repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | content
stringlengths 335
154k
|
|---|---|---|---|
akseshina/dl_course
|
seminar_6/hw_tSNE.ipynb
|
gpl-3.0
|
import numpy as np
import pandas as pd
from collections import Counter
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.manifold import TSNE
family_classification_metadata = pd.read_table('../seminar_5/data/family_classification_metadata.tab')
family_classification_sequences = pd.read_table('../seminar_5/data/family_classification_sequences.tab')
family_classification_metadata.head()
family_classification_sequences.head()
"""
Explanation: Protein Family Classification
End of explanation
"""
table = pd.read_csv('data/protVec_100d_3grams_without_quotes.csv', sep='\t', header=None)
table = table.T
header = table.iloc[0] # grab the first row for the header
prot2vec = table[1:] # take the data less the header row
prot2vec.columns = header # set the header row as the df header
prot2vec["AAA"].head()
"""
Explanation: Task:
Use your ProtVec embedding from homework 5 to perform protein family classification using RNN.
use 1000 most frequent families for classification
validate your results on the train-test split
reduce the dimensionality of the protein-space using Stochastic Neighbor Embedding and visualize two most frequent classes
compare your RNN results with SVM
visualization and metrics are up to you
Let's read the embedding matrix from the original article data.
End of explanation
"""
most_common_families = Counter(family_classification_metadata['FamilyID']).most_common(2)
most_common_families = [family for (family, count) in most_common_families]
family2num = {f: i for (i, f) in enumerate(most_common_families)}
family2num
MAX_PROTEIN_LEN = 501
EMBED_LEN = 100
all_proteins = family_classification_sequences['Sequences']
all_families = family_classification_metadata['FamilyID']
selected_ids = [i for i in range(len(all_proteins))
if all_families[i] in family2num and len(all_proteins[i]) <= MAX_PROTEIN_LEN]
def embedding(protein):
res = np.zeros(100)
for i in range(0, (len(protein) - 3) // 3):
try:
res += prot2vec[protein[i*3: i*3 + 3]]
except KeyError:
res += prot2vec['<unk>']
return res / ((len(protein) - 3) // 3)
#embedding(all_proteins[11])
selected_proteins = [embedding(p) for p in all_proteins[selected_ids]]
tsne = TSNE(n_components=2, random_state=42, angle=0.7, init='pca', n_iter=500)
XX = tsne.fit_transform(selected_proteins)
tsne_df = pd.DataFrame(XX, columns=['x0', 'x1'])
plt.figure(figsize=(10, 10))
colors = ['red', 'blue']
plt.scatter(tsne_df['x0'], tsne_df['x1'], c=[colors[family2num[f]] for f in all_families[selected_ids]], s=20);
"""
Explanation: 2 most frequent families:
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.2/examples/extinction_BK_binary.ipynb
|
gpl-3.0
|
!pip install -I "phoebe>=2.2,<2.3"
"""
Explanation: Extinction: B-K Binary
In this example, we'll reproduce Figures 1 and 2 in the extinction release paper (Jones et al. 2020).
"Let us begin with a rather extreme case, a synthetic binary comprised of a hot, B-type main sequence star(M=6.5 Msol,Teff=17000 K, and R=4.2 Rsol) anda cool K-type giant (M=1.8 Msol,Teff=4000 K, and R=39.5 Rsol)vin a 1000 day orbit -- a system where, while the temperature difference is large, the luminosities are similar." (Jones et al. 2020)
<img src="jones+20_fig1.png" alt="Figure 1" width="800px"/>
<img src="jones+20_fig2.png" alt="Figure 2" width="400px"/>
Setup
Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
from matplotlib import gridspec
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
End of explanation
"""
b.set_value('period', component='binary', value=1000.0*u.d)
b.set_value('teff', component='primary', value=17000*u.K)
b.set_value('teff', component='secondary', value=4000*u.K)
b.set_value('requiv', component='primary', value=4.22173036*u.solRad)
b.set_value('requiv', component='secondary', value=40.732435*u.solRad)
b.flip_constraint('mass@primary', solve_for='sma@binary')
b.set_value('mass', component='primary', value=6.5*u.solMass)
b.flip_constraint('mass@secondary', solve_for='q')
b.set_value('mass', component='secondary', value=1.9145*u.solMass)
"""
Explanation: First we'll define the system parameters
End of explanation
"""
times = phoebe.linspace(-20, 20, 101)
b.add_dataset('lc', times=times, dataset='B', passband="Johnson:B")
b.add_dataset('lc', times=times, dataset='R', passband="Cousins:R")
b.add_dataset('lc', times=times, dataset='KEP', passband="Kepler:mean")
"""
Explanation: And then create three light curve datasets at the same times, but in different passbands
End of explanation
"""
b.set_value_all('atm', 'ck2004')
b.set_value_all('gravb_bol', 0.0)
b.set_value_all('ld_mode_bol', 'manual')
b.set_value_all('ld_func_bol', 'linear')
b.set_value_all('ld_coeffs_bol', [0.0])
"""
Explanation: Now we'll set some atmosphere and limb-darkening options
End of explanation
"""
b.flip_constraints_all('ebv', solve_for='Av')
"""
Explanation: And flip all extinction constraints so we can provide E(B-V) values.
End of explanation
"""
b.set_value_all('ebv', 0.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext')
"""
Explanation: For comparison, we'll run a model without extinction
End of explanation
"""
b.set_value_all('ebv', 1.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext')
"""
Explanation: and then another model with extinction
End of explanation
"""
Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model'])
Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model'])
Bextmags_norm=Bextmags-Bextmags.min()+1
Bnoextmags_norm=Bnoextmags-Bnoextmags.min()+1
Bresid=Bextmags_norm-Bnoextmags_norm
Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model'])
Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model'])
Rextmags_norm=Rextmags-Rextmags.min()+1
Rnoextmags_norm=Rnoextmags-Rnoextmags.min()+1
Rresid=Rextmags_norm-Rnoextmags_norm
fig=plt.figure(figsize=(12,6))
gs=gridspec.GridSpec(2,2,height_ratios=[4,1],width_ratios=[1,1])
ax=plt.subplot(gs[0,0])
ax.plot(b['value@times@B@noext@model']/1000,Bnoextmags_norm,color='k',linestyle="--")
ax.plot(b['value@times@B@ext@model']/1000,Bextmags_norm,color='k',linestyle="-")
ax.set_ylabel('Magnitude')
ax.set_xticklabels([])
ax.set_xlim([-0.02,0.02])
ax.set_ylim([3.5,0.8])
ax.set_title('(a) Johnson B')
ax2=plt.subplot(gs[0,1])
ax2.plot(b['value@times@R@noext@model']/1000,Rnoextmags_norm,color='k',linestyle="--")
ax2.plot(b['value@times@R@ext@model']/1000,Rextmags_norm,color='k',linestyle="-")
ax2.set_ylabel('Magnitude')
ax2.set_xticklabels([])
ax2.set_xlim([-0.02,0.02])
ax2.set_ylim([3.5,0.8])
ax2.set_title('(b) Cousins Rc')
ax_1=plt.subplot(gs[1,0])
ax_1.plot(b['value@times@B@noext@model']/1000,Bresid,color='k',linestyle='-')
ax_1.set_ylabel(r'$\Delta m$')
ax_1.set_xlabel('Phase')
ax_1.set_xlim([-0.02,0.02])
ax_1.set_ylim([0.05,-0.3])
ax_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5)
ax2_1=plt.subplot(gs[1,1])
ax2_1.plot(b['value@times@R@noext@model']/1000,Rresid,color='k',linestyle='-')
ax2_1.set_ylabel(r'$\Delta m$')
ax2_1.set_xlabel('Phase')
ax2_1.set_xlim([-0.02,0.02])
ax2_1.set_ylim([0.05,-0.3])
ax2_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5)
plt.tight_layout()
fig.canvas.draw()
KEPextmags=-2.5*np.log10(b['value@fluxes@KEP@ext@model'])
KEPnoextmags=-2.5*np.log10(b['value@fluxes@KEP@noext@model'])
KEPextmags_norm=KEPextmags-KEPextmags.min()+1
KEPnoextmags_norm=KEPnoextmags-KEPnoextmags.min()+1
KEPresid=KEPextmags_norm-KEPnoextmags_norm
fig=plt.figure(figsize=(6,6))
gs=gridspec.GridSpec(2,1,height_ratios=[4,1])
ax=plt.subplot(gs[0])
ax.plot(b['value@times@KEP@noext@model']/1000,KEPnoextmags_norm,color='k',linestyle="--")
ax.plot(b['value@times@KEP@ext@model']/1000,KEPextmags_norm,color='k',linestyle="-")
ax.set_ylabel('Magnitude')
ax.set_xticklabels([])
ax.set_xlim([-0.02,0.02])
ax.set_ylim([3.5,0.8])
ax.set_title('Kepler K')
ax_1=plt.subplot(gs[1])
ax_1.plot(b['value@times@KEP@noext@model']/1000,KEPresid,color='k',linestyle='-')
ax_1.set_ylabel(r'$\Delta m$')
ax_1.set_xlabel('Phase')
ax_1.set_xlim([-0.02,0.02])
ax_1.set_ylim([0.05,-0.3])
ax_1.axhline(y=0., linestyle='dashed',color='k',linewidth=0.5)
plt.tight_layout()
fig.canvas.draw()
"""
Explanation: Lastly, we'll convert the model fluxes into magnitudes and format the figures.
End of explanation
"""
|
kmclaugh/fastai_courses
|
kevin_files/lesson4.ipynb
|
apache-2.0
|
ratings = pd.read_csv(path+'ratings.csv')
ratings.head()
len(ratings)
"""
Explanation: Set up data
We're working with the movielens data, which contains one rating per row, like this:
End of explanation
"""
movie_names = pd.read_csv(path+'movies.csv').set_index('movieId')['title'].to_dict()
users = ratings.userId.unique()
movies = ratings.movieId.unique()
userid2idx = {o:i for i,o in enumerate(users)}
movieid2idx = {o:i for i,o in enumerate(movies)}
"""
Explanation: Just for display purposes, let's read in the movie names too.
End of explanation
"""
ratings.movieId = ratings.movieId.apply(lambda x: movieid2idx[x])
ratings.userId = ratings.userId.apply(lambda x: userid2idx[x])
ratings.head()
user_min, user_max, movie_min, movie_max = (ratings.userId.min(),
ratings.userId.max(), ratings.movieId.min(), ratings.movieId.max())
user_min, user_max, movie_min, movie_max
n_users = ratings.userId.nunique()
n_movies = ratings.movieId.nunique()
n_users, n_movies
"""
Explanation: We update the movie and user ids so that they are contiguous integers, which we want when using embeddings.
End of explanation
"""
n_factors = 50
np.random.seed = 42
"""
Explanation: This is the number of latent factors in each embedding.
End of explanation
"""
msk = np.random.rand(len(ratings)) < 0.8
trn = ratings[msk]
val = ratings[~msk]
"""
Explanation: Randomly split into training and validation.
End of explanation
"""
g=ratings.groupby('userId')['rating'].count()
topUsers=g.sort_values(ascending=False)[:15]
g=ratings.groupby('movieId')['rating'].count()
topMovies=g.sort_values(ascending=False)[:15]
top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId')
top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId')
pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum)
"""
Explanation: Create subset for Excel
We create a crosstab of the most popular movies and most movie-addicted users which we'll copy into Excel for creating a simple example. This isn't necessary for any of the modeling below however.
End of explanation
"""
user_in = Input(shape=(1,), dtype='int64', name='user_in')
u = Embedding(n_users, n_factors, input_length=1, W_regularizer=l2(1e-4))(user_in)
movie_in = Input(shape=(1,), dtype='int64', name='movie_in')
m = Embedding(n_movies, n_factors, input_length=1, W_regularizer=l2(1e-4))(movie_in)
x = merge([u, m], mode='dot')
x = Flatten()(x)
model = Model([user_in, movie_in], x)
model.compile(Adam(0.001), loss='mse')
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=1,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.01
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=3,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.001
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=6,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: Dot product
The most basic model is a dot product of a movie embedding and a user embedding. Let's see how well that works:
End of explanation
"""
def embedding_input(name, n_in, n_out, reg):
inp = Input(shape=(1,), dtype='int64', name=name)
return inp, Embedding(n_in, n_out, input_length=1, W_regularizer=l2(reg))(inp)
user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)
movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)
def create_bias(inp, n_in):
x = Embedding(n_in, 1, input_length=1)(inp)
return Flatten()(x)
ub = create_bias(user_in, n_users)
mb = create_bias(movie_in, n_movies)
x = merge([u, m], mode='dot')
x = Flatten()(x)
x = merge([x, ub], mode='sum')
x = merge([x, mb], mode='sum')
model = Model([user_in, movie_in], x)
model.compile(Adam(0.001), loss='mse')
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=1,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.01
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=6,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.001
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=10,
validation_data=([val.userId, val.movieId], val.rating))
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=5,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: The best benchmarks are a bit over 0.9, so this model doesn't seem to be working that well...
Bias
The problem is likely to be that we don't have bias terms - that is, a single bias for each user and each movie representing how positive or negative each user is, and how good each movie is. We can add that easily by simply creating an embedding with one output for each movie and each user, and adding it to our output.
End of explanation
"""
model.save_weights(model_path+'bias.h5')
model.load_weights(model_path+'bias.h5')
"""
Explanation: This result is quite a bit better than the best benchmarks that we could find with a quick google search - so looks like a great approach!
End of explanation
"""
model.predict([np.array([3]), np.array([6])])
"""
Explanation: We can use the model to generate predictions by passing a pair of ints - a user id and a movie id. For instance, this predicts that user #3 would really enjoy movie #6.
End of explanation
"""
g=ratings.groupby('movieId')['rating'].count()
topMovies=g.sort_values(ascending=False)[:2000]
topMovies = np.array(topMovies.index)
"""
Explanation: Analyze results
To make the analysis of the factors more interesting, we'll restrict it to the top 2000 most popular movies.
End of explanation
"""
get_movie_bias = Model(movie_in, mb)
movie_bias = get_movie_bias.predict(topMovies)
movie_ratings = [(b[0], movie_names[movies[i]]) for i,b in zip(topMovies,movie_bias)]
"""
Explanation: First, we'll look at the movie bias term. We create a 'model' - which in keras is simply a way of associating one or more inputs with one more more outputs, using the functional API. Here, our input is the movie id (a single id), and the output is the movie bias (a single float).
End of explanation
"""
sorted(movie_ratings, key=itemgetter(0))[:15]
sorted(movie_ratings, key=itemgetter(0), reverse=True)[:15]
"""
Explanation: Now we can look at the top and bottom rated movies. These ratings are corrected for different levels of reviewer sentiment, as well as different types of movies that different reviewers watch.
End of explanation
"""
get_movie_emb = Model(movie_in, m)
movie_emb = np.squeeze(get_movie_emb.predict([topMovies]))
movie_emb.shape
"""
Explanation: We can now do the same thing for the embeddings.
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
movie_pca = pca.fit(movie_emb.T).components_
fac0 = movie_pca[0]
movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac0, topMovies)]
"""
Explanation: Because it's hard to interpret 50 embeddings, we use PCA to simplify them down to just 3 vectors.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
fac1 = movie_pca[1]
movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac1, topMovies)]
"""
Explanation: Here's the 1st component. It seems to be 'critically acclaimed' or 'classic'.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
fac2 = movie_pca[2]
movie_comp = [(f, movie_names[movies[i]]) for f,i in zip(fac2, topMovies)]
"""
Explanation: The 2nd is 'hollywood blockbuster'.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
"""
Explanation: The 3rd is 'violent vs happy'.
End of explanation
"""
import sys
stdout, stderr = sys.stdout, sys.stderr # save notebook stdout and stderr
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout, sys.stderr = stdout, stderr # restore notebook stdout and stderr
start=50; end=100
X = fac0[start:end]
Y = fac2[start:end]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(topMovies[start:end], X, Y):
plt.text(x,y,movie_names[movies[i]], color=np.random.rand(3)*0.7, fontsize=14)
plt.show()
"""
Explanation: We can draw a picture to see how various movies appear on the map of these components. This picture shows the 1st and 3rd components.
End of explanation
"""
user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)
movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)
x = merge([u, m], mode='concat')
x = Flatten()(x)
x = Dropout(0.3)(x)
x = Dense(70, activation='relu')(x)
x = Dropout(0.75)(x)
x = Dense(1)(x)
nn = Model([user_in, movie_in], x)
nn.compile(Adam(0.001), loss='mse')
nn.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, nb_epoch=8,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: Neural net
Rather than creating a special purpose architecture (like our dot-product with bias earlier), it's often both easier and more accurate to use a standard neural network. Let's try it! Here, we simply concatenate the user and movie embeddings into a single vector, which we feed into the neural net.
End of explanation
"""
|
HuanglabPurdue/NCS
|
clib/jupyter_notebooks/ncs_demo_simulation.ipynb
|
gpl-3.0
|
import matplotlib
import matplotlib.pyplot as pyplot
import numpy
import os
import time
# python3-6 NCS.
import pyNCS
import pyNCS.denoisetools as ncs
# python3 and C NCS.
import pyCNCS.ncs_c as ncsC
# Generate the same random noise each time.
numpy.random.seed(1)
py_ncs_path = os.path.dirname(os.path.abspath(pyNCS.__file__))
print(py_ncs_path)
"""
Explanation: NCS Demo Simulation.
In order for this to work you need both the reference NCS/python3-6 Python module and the NCS/clib Python module in your Python path.
End of explanation
"""
# create normalized ideal image
fpath1 = os.path.join(py_ncs_path, "../randwlcposition.mat")
imgsz = 128
zoom = 8
Pixelsize = 0.1
NA = 1.4
Lambda = 0.7
t = time.time()
res = ncs.genidealimage(imgsz,Pixelsize,zoom,NA,Lambda,fpath1)
elapsed = time.time()-t
print('Elapsed time for generating ideal image:', elapsed)
imso = res[0]
pyplot.imshow(imso,cmap="gray")
# select variance map from calibrated map data
fpath = os.path.join(py_ncs_path, "../gaincalibration_561_gain.mat")
noisemap = ncs.gennoisemap(imgsz,fpath)
varsub = noisemap[0]*10 # increase the readout noise by 10 to demonstrate the effect of NCS algorithm
gainsub = noisemap[1]
# generate simulated data
I = 100
bg = 10
offset = 100
N = 1
dataimg = ncs.gendatastack(imso,varsub,gainsub,I,bg,offset,N)
imsd = dataimg[1]
# generate noise corrected image
Rs = 8
iterationN = 15
alpha = 0.1
out_name = os.path.join(py_ncs_path, "../../out.npy")
# This is useful for debugging as it takes a long time for this approach to
# to reduce the noise of an image. Once you've done this once you can just
# load the reference result.
if not os.path.exists(out_name):
# This takes ~24 seconds on my laptop.
out = ncs.reducenoise(Rs,imsd[0:1],varsub,gainsub,imgsz,Pixelsize,NA,Lambda,alpha,iterationN)
numpy.save(out_name, out)
else:
out = numpy.load(out_name)
print(out.shape)
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(imsd[0],aspect='equal',cmap="gray")
ax2.imshow(out[0],aspect ='equal',cmap="gray")
pyplot.show()
"""
Explanation: pyNCS analysis
This is a basically a copy of NCS/python3-6/NCSdemo_simulation.py
End of explanation
"""
# Get the OTF mask that NCSDemo_simulation.py used.
rcfilter = ncs.genfilter(Rs+2,Pixelsize,NA,Lambda,'OTFweighted',1,0.7)
print(rcfilter.shape)
pyplot.imshow(rcfilter, cmap = "gray")
pyplot.show()
# Calculate gamma and run Python/C NCS.
gamma = varsub/(gainsub*gainsub)
# This takes ~100ms on my laptop, so ~200x faster even though it is single threaded.
out2 = ncsC.pyReduceNoise(imsd[0], gamma, rcfilter, alpha)
"""
Explanation: pyCNCS analysis
Mixed C and Python NCS analysis.
End of explanation
"""
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(out[0],aspect='equal',cmap="gray")
ax2.imshow(out2,aspect ='equal',cmap="gray")
pyplot.show()
pyplot.figure(figsize = (6,6))
pyplot.imshow(out[0] - out2, cmap = "gray")
pyplot.show()
print("Maximum pixel difference is {0:.3f}e-".format(numpy.max(numpy.abs(out[0] - out2))))
"""
Explanation: Compare results to reference implementation.
End of explanation
"""
# The C library expects the OTF be shifted to match the FFT frequency convention.
otf_mask = numpy.fft.fftshift(rcfilter)
# This takes ~50ms on my laptop, so 2x faster than the mixed C/Python approach.
out3 = ncsC.cReduceNoise(imsd[0], gamma, otf_mask, alpha)
"""
Explanation: pyCNCS analysis
Pure C NCS analysis.
End of explanation
"""
f,(ax1,ax2) = pyplot.subplots(1,2,sharey=False,figsize = (8,8))
ax1.imshow(out[0],aspect='equal',cmap="gray")
ax2.imshow(out3,aspect ='equal',cmap="gray")
pyplot.show()
pyplot.figure(figsize = (6,6))
pyplot.imshow(out2 - out3, cmap = "gray")
pyplot.show()
print("C vs C/Python difference is {0:.5f}%".format(numpy.max(numpy.abs(out2 - out3))/numpy.max(out2)))
"""
Explanation: Compare results to reference implementation.
End of explanation
"""
|
rkastilani/PowerOutagePredictor
|
PowerOutagePredictor/Linear/Lasso.ipynb
|
mit
|
import numpy as np
import pandas as pd
from sklearn import linear_model
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
data = pd.read_csv("../../Data/2014outagesJerry.csv")
data.head()
"""
Explanation: Lasso Regression:
Performs L1 regularization, i.e. adds penalty equivalent to absolute value of the magnitude of coefficients
Minimization objective = LS Obj + α * (sum of absolute value of coefficients)
End of explanation
"""
# Select input/output data
Y_tot = data['Total_outages']
X_tot = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_tot,Y_tot)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_tot,regr_lasso.predict(X_tot)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_tot = linear_model.Lasso()
regr_lasso_best_tot.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_tot.fit(X_tot,Y_tot)
Y_tot_predict = regr_lasso_best_tot.predict(X_tot)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_tot,Y_tot_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_tot,Y_tot_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_tot.score(X_tot,Y_tot))
"""
Explanation: Total Outages
End of explanation
"""
# Select input/output data
Y_eqp = data['Equipment']
X_eqp = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_eqp,Y_eqp)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_eqp,regr_lasso.predict(X_eqp)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_eqp = linear_model.Lasso()
regr_lasso_best_eqp.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_eqp.fit(X_eqp,Y_eqp)
Y_eqp_predict = regr_lasso_best_eqp.predict(X_eqp)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_eqp,Y_eqp_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_eqp,Y_eqp_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_eqp.score(X_eqp,Y_eqp))
"""
Explanation: Equipment-caused Outages
End of explanation
"""
# Select input/output data
Y_tree = data['Trees']
#X_tree = data[['Max_temp_F','Max_humidity_percent','Min_visibility_mi','Max_windspeed_mph','Precipitation_in','Event_Hail']]
X_tree = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_tree,Y_tree)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_tree,regr_lasso.predict(X_tree)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_tree = linear_model.Lasso()
regr_lasso_best_tree.set_params(alpha=1e-5,normalize=True,max_iter=1e6)
regr_lasso_best_tree.fit(X_tree,Y_tree)
Y_tree_predict = regr_lasso_best_tree.predict(X_tree)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_tree,Y_tree_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_tree,Y_tree_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_tree.score(X_tree,Y_tree))
"""
Explanation: Trees-caused Outages
End of explanation
"""
# Select input/output data
Y_ani = data['Animals']
X_ani = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_ani,Y_ani)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_ani,regr_lasso.predict(X_ani)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_ani = linear_model.Lasso()
regr_lasso_best_ani.set_params(alpha=1e-4,normalize=True,max_iter=1e6)
regr_lasso_best_ani.fit(X_ani,Y_ani)
Y_ani_predict = regr_lasso_best_ani.predict(X_ani)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_ani,Y_ani_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_ani,Y_ani_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_ani.score(X_ani,Y_ani))
"""
Explanation: Animals-caused Outages
End of explanation
"""
# Select input/output data
Y_lightening = data['Lightning']
X_lightening = data[['Day_length_hr','Max_temp_F','Avg_Temp_F','Min_temp_F','Max_humidity_percent','Avg_humidity_percent','Min_humidity_percent','Max_visibility_mi','Avg_visibility_mi','Min_visibility_mi','Max_windspeed_mph','Avg_windspeed_mph','Max_windgust_mph','Precipitation_in','Event_fog','Event_rain','Event_snow','Event_thunderstorm','Event_Hail']]
# Initialize lists
coefs = []
trainerror = []
# Define lambda space
lambdas = np.logspace(-6,6,200)
# Define type of regressor
regr_lasso = linear_model.Lasso()
# loop over lambda (a) values (strength of regularization)
for a in lambdas:
regr_lasso.set_params(alpha=a,normalize=True,max_iter=1e6)
regr_lasso.fit(X_lightening,Y_lightening)
coefs.append(regr_lasso.coef_)
trainerror.append(mean_squared_error(Y_lightening,regr_lasso.predict(X_lightening)))
# Plot
plt.figure(figsize=(10,3))
# figure 1: Lasso Coef. and lambda
plt.subplot(121)
plt.plot(lambdas,coefs)
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('coefs')
plt.title('LASSO coefs vs $\lambda$')
# figure 2: Error and lambda
plt.subplot(122)
plt.plot(lambdas,trainerror,label='train error')
plt.xscale('log')
plt.xlabel('$\lambda$')
plt.ylabel('error')
plt.legend(loc='lower right')
plt.title('error vs $\lambda$')
plt.show()
# pick the best alpha value
regr_lasso_best_lightening = linear_model.Lasso()
regr_lasso_best_lightening.set_params(alpha=1e-5,normalize=True,max_iter=1e6)
regr_lasso_best_lightening.fit(X_lightening,Y_lightening)
Y_lightening_predict = regr_lasso_best_lightening.predict(X_lightening)
#make parity plot
plt.figure(figsize=(4,4))
plt.scatter(Y_lightening,Y_lightening_predict)
plt.plot([0,10],[0,10],lw=4,color='black')
plt.show()
#calculate the test and train error
print("Train error",mean_squared_error(Y_lightening,Y_lightening_predict))
# Returns the coefficient of determination R^2 of the prediction.
print("R^2",regr_lasso_best_lightening.score(X_lightening,Y_lightening))
"""
Explanation: Lightning-caused Outages
End of explanation
"""
|
iRipVanWinkle/ml
|
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Preparing Numeric Data.ipynb
|
mit
|
# This line lets me show plots
%matplotlib inline
#import useful modules
import numpy as np
import pandas as pd
from ggplot import mtcars
"""
Explanation: Preparing Numeric Data
There are variety of preprocessing tasks one should consider before using numeric data in analysis and predictive models.
Centering and Scaling
Numeric variables are not always directly comparable as variables are often measured on different scales and cover different ranges. Furthermore, large differences between values, one variable has values in the range 1-100 while other variable ranges from 1 to 100000, can affect certain modeling techniques (e.g., where the values of the two variables need to be combined in some way).
Some of the issues mentioned above can be alleviated by centering and scaling the data. A common way to center data is to subtract the mean value from each data point, which centers the data around zero (and sets the new mean to zero).
End of explanation
"""
print (mtcars.head() )
colmeans = mtcars.sum()/mtcars.shape[0] # Get column means
colmeans
"""
Explanation: Let's center the mtcars dataset in the ggplot library. First, let's calculate the means for the data in each column:
End of explanation
"""
centered_mtcars = mtcars - colmeans
print(centered_mtcars.describe())
"""
Explanation: Now, subtract the column means from each row, element-wise, to zero center the data:
End of explanation
"""
# Get column standard deviations
column_deviations = centered_mtcars.std(axis=0)
centered_and_scaled_mtcars = centered_mtcars/column_deviations
print(centered_and_scaled_mtcars.describe())
"""
Explanation: Notice that in zero-centered data, negative values represent original values that were below average and positive numbers represent values that were above average.
To put all values on a common scale, we can divide all values in a column by that column's standard deviation.
End of explanation
"""
from sklearn import preprocessing
scaled_data = preprocessing.scale(mtcars)
#reconstruct a DataFrame from the scaled data
scaled_mtcars = pd.DataFrame(scaled_data,
index=mtcars.index,
columns=mtcars.columns)
print(scaled_mtcars.describe() )
"""
Explanation: All columns/variables/features now have a standard deviation of 1, and roughly the same mean, 0. This can also be achieved using the scale() function in the module scikit-learn. scale() returns an ndarray which can be convert into a DataFrame, if needed.
End of explanation
"""
normally_distributed = np.random.normal(size=10000) # Generate normal data*
normally_distributed = pd.DataFrame(normally_distributed) # Convert to DF
normally_distributed.hist(figsize=(8,8), # Plot histogram
bins=30)
skewed = np.random.exponential(scale=2, # Generate skewed data
size= 10000)
skewed = pd.DataFrame(skewed) # Convert to DF
skewed.hist(figsize=(8,8), # Plot histogram
bins=50)
"""
Explanation: Note that the values are not exactly the same as those calculated "manually", likely due to scikit-learn's implementation of centering and scaling.
Dealing With Skewed Data
The distribution of the data can have a significant impact on analysis and modeling, as many techniques assume, or require that the data follows a particular distribution, e.g., Gaussian. Some data sets exhibit significant asymmetry (skewness). To illustrate, let's generate a few distributions. Let us look at a few examples.
End of explanation
"""
sqrt_transformed = skewed.apply(np.sqrt) # Get the square root of data points*
sqrt_transformed.hist(figsize=(8,8), # Plot histogram
bins=50)
log_transformed = (skewed+1).apply(np.log) # Get the log of the data
log_transformed.hist(figsize = (8,8), # Plot histogram
bins=50)
"""
Explanation: Data with a long right tail is called positively skewed or right skewed. In a skewed dataset, the extreme values in the long tail can have a very large influence on some of the test and models performed or build for the data.
Reducing skew may be in some cases appropriate. Two simple transformations that can reduce skew are taking the square root of each data point or taking the natural logarithm of each data point.
End of explanation
"""
mtcars.ix[:,0:6].corr() # Check the pairwise correlations of 6 variables
"""
Explanation: Highly Correlated Variables
In predictive modeling, each variable used to construct a model would ideally represent some unique feature of the data. In reality, variables often exhibit collinearity, and variables with strong correlations can interfere with the modeling process. We can check the pairwise correlations between numeric variables using the df.corr() function:
End of explanation
"""
from pandas.tools.plotting import scatter_matrix
scatter_matrix(mtcars.ix[:,0:6], # Make a scatter matrix of 6 columns
figsize=(10, 10), # Set plot size
diagonal='kde') # Show distribution estimates on diagonal
"""
Explanation: A positive correlation implies that when one variable goes up the other tends to go up as well, while negative correlations indicate an inverse relationship.
In the mtcar dataset, the number of cylinders a car has (cyl) and its weight (wt) have fairly strong negative correlations to gas mileage (mpg.), i.e., heavier cars and cars with more cylinders tend to get lower gas mileage.
A scatter plot matrix can help visualize this. pandas' scatter_matrix() function accomplishes this:
End of explanation
"""
|
quantopian/research_public
|
research/Markowitz-Quantopian-Research.ipynb
|
apache-2.0
|
import numpy as np
import matplotlib.pyplot as plt
import cvxopt as opt
from cvxopt import blas, solvers
import pandas as pd
np.random.seed(123)
# Turn off progress printing
solvers.options['show_progress'] = False
"""
Explanation: The Efficient Frontier: Markowitz Portfolio optimization in Python
By Dr. Thomas Starke, David Edwards, Dr. Thomas Wiecki
Notebook released under the Creative Commons Attribution 4.0 License.
Introduction
In this blog post you will learn about the basic idea behind Markowitz portfolio optimization and how to do it in Python. We will then show a simple backtest that rebalances its portfolio in a Markowitz-optimal way. We hope you enjoy it and find it enlightening.
We will start by using random data and save actual stock data for later. This will hopefully help you get a sense of how to use modelling and simulation to improve your understanding of the theoretical concepts. Don‘t forget that the skill of an algo-trader is to put mathematical models into code, and this example is great practice.
Let's start with importing a few modules which we need later, and producing a series of normally distributed returns. cvxopt is a convex solver which we will use for the optimization of the portfolio.
Simulations
End of explanation
"""
## NUMBER OF ASSETS
n_assets = 4
## NUMBER OF OBSERVATIONS
n_obs = 1000
return_vec = np.random.randn(n_assets, n_obs)
plt.plot(return_vec.T, alpha=.4);
plt.xlabel('time')
plt.ylabel('returns');
"""
Explanation: Assume that we have 4 assets, each with a return series of length 1000. We can use numpy.random.randn to sample returns from a normal distribution.
End of explanation
"""
def rand_weights(n):
''' Produces n random weights that sum to 1 '''
k = np.random.rand(n)
return k / sum(k)
print rand_weights(n_assets)
print rand_weights(n_assets)
"""
Explanation: These return series can be used to create a wide range of portfolios. We will produce random weight vectors and plot those portfolios. As we want all our capital to be invested, the weights will have to sum to one.
End of explanation
"""
def random_portfolio(returns):
'''
Returns the mean and standard deviation of returns for a random portfolio
'''
p = np.asmatrix(np.mean(returns, axis=1))
w = np.asmatrix(rand_weights(returns.shape[0]))
C = np.asmatrix(np.cov(returns))
mu = w * p.T
sigma = np.sqrt(w * C * w.T)
# This recursion reduces outliers to keep plots pretty
if sigma > 2:
return random_portfolio(returns)
return mu, sigma
"""
Explanation: Next, let's evaluate how these random portfolios would perform by calculating the mean returns and the volatility (here we are using standard deviation). You can see that there is
a filter so that we only plot portfolios with a standard deviation of < 2 for better illustration.
End of explanation
"""
n_portfolios = 500
means, stds = np.column_stack([
random_portfolio(return_vec)
for _ in xrange(n_portfolios)
])
"""
Explanation: We calculate the return using
$$ R = p^T w $$
where $R$ is the expected return, $p^T$ is the transpose of the vector for the mean
returns for each time series and w is the weight vector of the portfolio. $p$ is a $N \times 1$
column vector, so $p^T$ turns is a $1 \times N$ row vector which can be multiplied with the
$N \times 1$ weight (column) vector w to give a scalar result. This is equivalent to the dot
product used in the code. Keep in mind that Python has a reversed definition of
rows and columns and the accurate NumPy version of the previous equation would
be R = w * p.T
Next, we calculate the standard deviation
$$\sigma = \sqrt{w^T C w}$$
where $C$ is the $N \times N$ covariance matrix of the returns. Please
note that if we simply calculated the simple standard deviation with the appropriate weighting using std(array(ret_vec).T*w) we would get a slightly different
’bullet’. This is because the simple standard deviation calculation would not take
covariances into account. In the covariance matrix, the values on the diagonal
represent the simple variances of each asset, while the off-diagonal entries are the variances between the assets. By using ordinary std() we effectively only regard the
diagonal and miss the rest. A small but significant difference.
Lets generate the mean returns and volatility for 500 random portfolios:
End of explanation
"""
plt.plot(stds, means, 'o', markersize=5)
plt.xlabel('std')
plt.ylabel('mean')
plt.title('Mean and standard deviation of returns of randomly generated portfolios');
"""
Explanation: Upon plotting these you will observe that they form a characteristic parabolic
shape called the "Markowitz bullet" whose upper boundary is called the "efficient
frontier", where we have the lowest variance for a given expected return.
End of explanation
"""
def optimal_portfolio(returns):
n = len(returns)
returns = np.asmatrix(returns)
N = 100
mus = [10**(5.0 * t/N - 1.0) for t in range(N)]
# Convert to cvxopt matrices
S = opt.matrix(np.cov(returns))
pbar = opt.matrix(np.mean(returns, axis=1))
# Create constraint matrices
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix
h = opt.matrix(0.0, (n ,1))
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Calculate efficient frontier weights using quadratic programming
portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x']
for mu in mus]
## CALCULATE RISKS AND RETURNS FOR FRONTIER
returns = [blas.dot(pbar, x) for x in portfolios]
risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]
## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE
m1 = np.polyfit(returns, risks, 2)
x1 = np.sqrt(m1[2] / m1[0])
# CALCULATE THE OPTIMAL PORTFOLIO
wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']
return np.asarray(wt), returns, risks
weights, returns, risks = optimal_portfolio(return_vec)
plt.plot(stds, means, 'o')
plt.ylabel('mean')
plt.xlabel('std')
plt.plot(risks, returns, 'y-o');
"""
Explanation: Markowitz optimization and the Efficient Frontier
We can now calculate the efficient frontier Markowitz-style. This is done by minimizing
$$ w^T C w$$
for fixed expected portfolio return $R^T w$ while keeping the sum of all the
weights equal to 1:
$$ \sum_{i}{w_i} = 1 $$
Here we parametrically run through $R^T w = \mu$ and find the minimum variance
for different $\mu$‘s. This can be done with scipy.optimise.minimize but we have
to define quite a complex problem with bounds, constraints and a Lagrange multiplier. Conveniently, the cvxopt package, a convex solver, does all of that for us. We used one of their examples with some modifications as shown below. For more information on using this package please have a look at the cvxopt example.
The mus vector produces a non-linear series of expected return values $\mu$, for each of which we will find a minimum-variance portfolio. We will see later that we don‘t need to calculate a lot of these, as they perfectly fit a parabola which can safely be extrapolated for higher values.
End of explanation
"""
print weights
"""
Explanation: In yellow you can see the optimal portfolios for each of the desired returns (i.e. the mus). In addition, we get the weights for one optimal portfolio:
End of explanation
"""
tickers = ['IBM', 'SBUX', 'XOM', 'AAPL', 'MSFT', 'TLT', 'SHY']
data = get_pricing(
tickers,
start_date='2005-01-01',
end_date='2015-01-01'
)
data.loc['price', :, :].plot()
plt.ylabel('price in $')
plt.legend(tickers);
"""
Explanation: Backtesting on real market data
This is all very interesting but not very applied. We next demonstrate how you can create a simple algorithm in zipline -- the open-source backtester that powers Quantopian -- to test this optimization on actual historical stock data.
First, lets load in some historical data using Quantopian's get_pricing().
End of explanation
"""
import zipline
from zipline.api import (
set_slippage,
slippage,
set_commission,
commission,
order_target_percent,
symbols
)
from zipline import TradingAlgorithm
def initialize(context):
'''
Called once at the very beginning of a backtest (and live trading).
Use this method to set up any bookkeeping variables.
The context object is passed to all the other methods in your algorithm.
Parameters
context: An initialized and empty Python dictionary that has been
augmented so that properties can be accessed using dot
notation as well as the traditional bracket notation.
Returns None
'''
# Turn off the slippage model
set_slippage(slippage.FixedSlippage(spread=0.0))
# Set the commission model (Interactive Brokers Commission)
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
context.tick = 0
context.assets = symbols('IBM', 'SBUX', 'XOM', 'AAPL', 'MSFT', 'TLT', 'SHY')
def handle_data(context, data):
'''
Called when a market event occurs for any of the algorithm's
securities.
Parameters
data: A dictionary keyed by security id containing the current
state of the securities in the algo's universe.
context: The same context object from the initialize function.
Stores the up to date portfolio as well as any state
variables defined.
Returns None
'''
# Allow history to accumulate 100 days of prices before trading
# and rebalance every day thereafter.
context.tick += 1
if context.tick < 100:
return
# Get rolling window of past prices and compute returns
prices = data.history(context.assets, 'price', 100, '1d').dropna()
returns = prices.pct_change().dropna()
try:
# Perform Markowitz-style portfolio optimization
weights, _, _ = optimal_portfolio(returns.T)
# Rebalance portfolio accordingly
for stock, weight in zip(prices.columns, weights):
order_target_percent(stock, weight)
except ValueError as e:
# Sometimes this error is thrown
# ValueError: Rank(A) < p or Rank([P; A; G]) < n
pass
# Instantinate algorithm
algo = TradingAlgorithm(initialize=initialize,
handle_data=handle_data)
# Run algorithm
results = algo.run(data.swapaxes(2, 0, 1))
results.portfolio_value.plot();
"""
Explanation: Next, we'll create a zipline algorithm by defining two functions: initialize(), which is called once before the simulation starts, and handle_data(), which is called for every trading bar. We then instantiate the algorithm object.
If you are confused about the syntax of zipline, check out the tutorial.
End of explanation
"""
|
palrogg/foundations-homework
|
14/Homework-14-Ronga.ipynb
|
mit
|
# If you'd like to download it through the command line...
!curl -O http://www.cs.cornell.edu/home/llee/data/convote/convote_v1.1.tar.gz
# And then extract it through the command line...
!tar -zxf convote_v1.1.tar.gz
"""
Explanation: Homework 14 (or so): TF-IDF text analysis and clustering
Hooray, we kind of figured out how text analysis works! Some of it is still magic, but at least the TF and IDF parts make a little sense. Kind of. Somewhat.
No, just kidding, we're professionals now.
Investigating the Congressional Record
The Congressional Record is more or less what happened in Congress every single day. Speeches and all that. A good large source of text data, maybe?
Let's pretend it's totally secret but we just got it leaked to us in a data dump, and we need to check it out. It was leaked from this page here.
End of explanation
"""
# glob finds files matching a certain filename pattern
import glob
# Give me all the text files
paths = glob.glob('convote_v1.1/data_stage_one/development_set/*')
paths[:5]
len(paths)
"""
Explanation: You can explore the files if you'd like, but we're going to get the ones from convote_v1.1/data_stage_one/development_set/. It's a bunch of text files.
End of explanation
"""
speeches = []
for path in paths:
with open(path) as speech_file:
speech = {
'pathname': path,
'filename': path.split('/')[-1],
'content': speech_file.read()
}
speeches.append(speech)
speeches_df = pd.DataFrame(speeches)
speeches_df.head()
"""
Explanation: So great, we have 702 of them. Now let's import them.
End of explanation
"""
for item in speeches_df['content'][:5]:
print(item[:140], "\n")
"""
Explanation: In class we had the texts variable. For the homework can just do speeches_df['content'] to get the same sort of list of stuff.
Take a look at the contents of the first 5 speeches
End of explanation
"""
count_vectorizer = CountVectorizer(stop_words='english')
X = count_vectorizer.fit_transform(speeches_df['content'])
X
X_df = pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
X_df.head(10)
"""
Explanation: Doing our analysis
Use the sklearn package and a plain boring CountVectorizer to get a list of all of the tokens used in the speeches. If it won't list them all, that's ok! Make a dataframe with those terms as columns.
Be sure to include English-language stopwords
End of explanation
"""
count_vectorizer = CountVectorizer(stop_words='english', max_features=100)
X = count_vectorizer.fit_transform(speeches_df['content'])
X
pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names()).head()
"""
Explanation: Okay, it's far too big to even look at. Let's try to get a list of features from a new CountVectorizer that only takes the top 100 words.
End of explanation
"""
X_df = pd.DataFrame(X.toarray(), columns=count_vectorizer.get_feature_names())
"""
Explanation: Now let's push all of that into a dataframe with nicely named columns.
End of explanation
"""
no_chairman = X_df[X_df['chairman'] == 0]['chairman'].count()
no_chairman_no_mr = X_df[(X_df['chairman'] == 0) & (X_df['mr'] == 0)]['chairman'].count()
print("In a total of", len(X_df), "speeches,", no_chairman, "don't mention “chairman” and", no_chairman_no_mr, "mention neither “mr” nor “chairman”.")
"""
Explanation: Everyone seems to start their speeches with "mr chairman" - how many speeches are there total, and many don't mention "chairman" and how many mention neither "mr" nor "chairman"?
End of explanation
"""
print("The index of this speech is", X_df['thank'].idxmax())
"""
Explanation: What is the index of the speech thank is the most thankful, a.k.a. includes the word 'thank' the most times?
End of explanation
"""
china_trade = X_df.sort_values(by=['china', 'trade'], ascending=[0, 0])[['china', 'trade']].head(3)
print("These three speeches have the indexes ", *list(china_trade.index))
china_trade
"""
Explanation: If I'm searching for China and trade, what are the top 3 speeches to read according to the CountVectoriser?
End of explanation
"""
def simple_tokenizer(str_input):
words = re.sub(r"[^A-Za-z0-9\-]", " ", str_input).lower().split()
return words
tfidf_vectorizer = TfidfVectorizer(stop_words='english', tokenizer=simple_tokenizer, use_idf=False, norm='l1')
X = tfidf_vectorizer.fit_transform(speeches_df['content'])
TF_pd = pd.DataFrame(X.toarray(), columns=tfidf_vectorizer.get_feature_names())
china_trade = TF_pd.sort_values(by=['china', 'trade'], ascending=[0, 0])[['china', 'trade']].head(3)
print("The three top speeches have the indexes ", *list(china_trade.index))
china_trade
"""
Explanation: Now what if I'm using a TfidfVectorizer?
End of explanation
"""
# index 0 is the first speech, which was the first one imported.
paths[0]
# Pass that into 'cat' using { } which lets you put variables in shell commands
# that way you can pass the path to cat
!cat {paths[0]}
"""
Explanation: What's the content of the speeches? Here's a way to get them:
End of explanation
"""
numbers = list(range(0, 10))
numbers = list(map(str, numbers))
words_list = [i for i in list(TF_pd.columns) if i[0] not in numbers]
print(*words_list[5:100], sep='|') # to get some ideas
chaos = TF_pd.sort_values(by=['awfully', 'bacterial'], ascending=[0, 0])[['awfully', 'bacterial']].head(3)
print("The three top speeches have the indexes ", *list(chaos.index))
chaos
gun_bomb = TF_pd.sort_values(by=['gun', 'bomb'], ascending=[0, 0])[['gun', 'bomb']].head(3)
print("The three top speeches have the indexes ", *list(gun_bomb.index))
gun_bomb
"""
Explanation: Now search for something else! Another two terms that might show up. elections and chaos? Whatever you thnik might be interesting.
End of explanation
"""
countingVectorizer = CountVectorizer(tokenizer=simple_tokenizer, stop_words='english')
TF_Vectorizer = TfidfVectorizer(use_idf=False, tokenizer=simple_tokenizer, stop_words='english')
TF_IDF_Vectorizer = TfidfVectorizer(use_idf=True, tokenizer=simple_tokenizer, stop_words='english')
Vectorizer_list = [countingVectorizer, TF_Vectorizer, TF_IDF_Vectorizer]
Vectorizer_names = ['', 'simple counting vectorizer', 'term frequency vectorizer', 'term frequency IDF vectorizer']
count = 1
for vectorizer in Vectorizer_list:
print("\n[" + str(count) + "]", Vectorizer_names[count])
X = vectorizer.fit_transform(speeches_df['content'])
number_of_clusters = 8
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_five_words = [terms[ind] for ind in order_centroids[i, :10]]
print("Cluster {}: {}".format(i, ' '.join(top_five_words)))
count += 1
"""
Explanation: Enough of this garbage, let's cluster
Using a simple counting vectorizer, cluster the documents into eight categories, telling me what the top terms are per category.
Using a term frequency vectorizer, cluster the documents into eight categories, telling me what the top terms are per category.
Using a term frequency inverse document frequency vectorizer, cluster the documents into eight categories, telling me what the top terms are per category.
CountVectorizer(): Convert a collection of text documents to a matrix of token counts
TfidfVectorizer(use_idf=False): Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer.
TfidfVectorizer(use_idf=True) (default): Enable inverse-document-frequency reweighting.
End of explanation
"""
!curl -LO https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip
#!unzip hp.zip
paths_potter = glob.glob('hp/*')
paths_potter[:3]
potter_texts = []
for path in paths_potter:
with open(path) as speech_file:
text = {
'pathname': path,
'filename': path.split('/')[-1],
'content': speech_file.read()
}
potter_texts.append(text)
potter_df = pd.DataFrame(potter_texts)
potter_df.head(2)
#1
vectorizer = TfidfVectorizer(use_idf=True, tokenizer=simple_tokenizer, stop_words='english')
X = vectorizer.fit_transform(potter_df['content'])
#2
number_of_clusters = 2
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
#3
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
top_ten_words = [terms[ind] for ind in order_centroids[i, :5]]
print("Cluster {}: {}".format(i, ' '.join(top_ten_words)))
#4
results = pd.DataFrame()
results['text'] = potter_df['content']
results['category'] = km.labels_
results.head(10)
"""
Explanation: Which one do you think works the best?
The TF-IDF is definitely the most efficient one, at least in this case!
Harry Potter time
I have a scraped collection of Harry Potter fanfiction at https://github.com/ledeprogram/courses/raw/master/algorithms/data/hp.zip.
I want you to read them in, vectorize them and cluster them. Use this process to find out the two types of Harry Potter fanfiction. What is your hypothesis?
curl -LO
End of explanation
"""
|
tkurfurst/deep-learning
|
batch-norm/Batch_Normalization_Lesson.ipynb
|
mit
|
# Import necessary packages
import tensorflow as tf
import tqdm
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Import MNIST data so we have something for our experiments
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
"""
Explanation: Batch Normalization – Lesson
What is it?
What are it's benefits?
How do we add it to a network?
Let's see it work!
What are you hiding?
What is Batch Normalization?<a id='theory'></a>
Batch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to layers within the network. It's called "batch" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.
Why might this help? Well, we know that normalizing the inputs to a network helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the first layer of a smaller network.
For example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network.
Likewise, the output of layer 2 can be thought of as the input to a single layer network, consistng only of layer 3.
When you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).
Beyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call internal covariate shift. This discussion is best handled in the paper and in Deep Learning a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of Chapter 8: Optimization for Training Deep Models.
Benefits of Batch Normalization<a id="benefits"></a>
Batch normalization optimizes network training. It has been shown to have several benefits:
1. Networks train faster – Each training iteration will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall.
2. Allows higher learning rates – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train.
3. Makes weights easier to initialize – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights.
4. Makes more activation functions viable – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again.
5. Simplifies the creation of deeper networks – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.
6. Provides a bit of regularlization – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network.
7. May give better results overall – Some tests seem to show batch normalization actually improves the train.ing results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization.
Batch Normalization in TensorFlow<a id="implementation_1"></a>
This section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow.
The following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the tensorflow package contains all the code you'll actually need for batch normalization.
End of explanation
"""
class NeuralNet:
def __init__(self, initial_weights, activation_fn, use_batch_norm):
"""
Initializes this object, creating a TensorFlow graph using the given parameters.
:param initial_weights: list of NumPy arrays or Tensors
Initial values for the weights for every layer in the network. We pass these in
so we can create multiple networks with the same starting weights to eliminate
training differences caused by random initialization differences.
The number of items in the list defines the number of layers in the network,
and the shapes of the items in the list define the number of nodes in each layer.
e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would
create a network with 784 inputs going into a hidden layer with 256 nodes,
followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
"""
# Keep track of whether or not this network uses batch normalization.
self.use_batch_norm = use_batch_norm
self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm"
# Batch normalization needs to do different calculations during training and inference,
# so we use this placeholder to tell the graph which behavior to use.
self.is_training = tf.placeholder(tf.bool, name="is_training")
# This list is just for keeping track of data we want to plot later.
# It doesn't actually have anything to do with neural nets or batch normalization.
self.training_accuracies = []
# Create the network graph, but it will not actually have any real values until after you
# call train or test
self.build_network(initial_weights, activation_fn)
def build_network(self, initial_weights, activation_fn):
"""
Build the graph. The graph still needs to be trained via the `train` method.
:param initial_weights: list of NumPy arrays or Tensors
See __init__ for description.
:param activation_fn: Callable
See __init__ for description.
"""
self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])
layer_in = self.input_layer
for weights in initial_weights[:-1]:
layer_in = self.fully_connected(layer_in, weights, activation_fn)
self.output_layer = self.fully_connected(layer_in, initial_weights[-1])
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
# Since this class supports both options, only use batch normalization when
# requested. However, do not use it on the final layer, which we identify
# by its lack of an activation function.
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
# (See later in the notebook for more details.)
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
# Apply batch normalization to the linear combination of the inputs and weights
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# Now apply the activation function, *after* the normalization.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):
"""
Trains the model on the MNIST training dataset.
:param session: Session
Used to run training graph operations.
:param learning_rate: float
Learning rate used during gradient descent.
:param training_batches: int
Number of batches to train.
:param batches_per_sample: int
How many batches to train before sampling the validation accuracy.
:param save_model_as: string or None (default None)
Name to use if you want to save the trained model.
"""
# This placeholder will store the target labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define loss and optimizer
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if self.use_batch_norm:
# If we don't include the update ops as dependencies on the train step, the
# tf.layers.batch_normalization layers won't update their population statistics,
# which will cause the model to fail at inference time
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
# Train for the appropriate number of batches. (tqdm is only for a nice timing display)
for i in tqdm.tqdm(range(training_batches)):
# We use batches of 60 just because the original paper did. You can use any size batch you like.
batch_xs, batch_ys = mnist.train.next_batch(60)
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
# Periodically test accuracy against the 5k validation images and store it for plotting later.
if i % batches_per_sample == 0:
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
self.training_accuracies.append(test_accuracy)
# After training, report accuracy against test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))
# If you want to use this model later for inference instead of having to retrain it,
# just construct it with the same parameters and then pass this file to the 'test' function
if save_model_as:
tf.train.Saver().save(session, save_model_as)
def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):
"""
Trains a trained model on the MNIST testing dataset.
:param session: Session
Used to run the testing graph operations.
:param test_training_accuracy: bool (default False)
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
Note: in real life, *always* perform inference using the population mean and variance.
This parameter exists just to support demonstrating what happens if you don't.
:param include_individual_predictions: bool (default True)
This function always performs an accuracy test against the entire test set. But if this parameter
is True, it performs an extra test, doing 200 predictions one at a time, and displays the results
and accuracy.
:param restore_from: string or None (default None)
Name of a saved model if you want to test with previously saved weights.
"""
# This placeholder will store the true labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# If provided, restore from a previously saved model
if restore_from:
tf.train.Saver().restore(session, restore_from)
# Test against all of the MNIST test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,
labels: mnist.test.labels,
self.is_training: test_training_accuracy})
print('-'*75)
print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))
# If requested, perform tests predicting individual values rather than batches
if include_individual_predictions:
predictions = []
correct = 0
# Do 200 predictions, 1 at a time
for i in range(200):
# This is a normal prediction using an individual test case. However, notice
# we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.
# Remember that will tell it whether it should use the batch mean & variance or
# the population estimates that were calucated while training the model.
pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],
feed_dict={self.input_layer: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
self.is_training: test_training_accuracy})
correct += corr
predictions.append(pred[0])
print("200 Predictions:", predictions)
print("Accuracy on 200 samples:", correct/200)
"""
Explanation: Neural network classes for testing
The following class, NeuralNet, allows us to create identical neural networks with and without batch normalization. The code is heaviy documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.
About the code:
This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.
It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.
End of explanation
"""
def plot_training_accuracies(*args, **kwargs):
"""
Displays a plot of the accuracies calculated during training to demonstrate
how many iterations it took for the model(s) to converge.
:param args: One or more NeuralNet objects
You can supply any number of NeuralNet objects as unnamed arguments
and this will display their training accuracies. Be sure to call `train`
the NeuralNets before calling this function.
:param kwargs:
You can supply any named parameters here, but `batches_per_sample` is the only
one we look for. It should match the `batches_per_sample` value you passed
to the `train` function.
"""
fig, ax = plt.subplots()
batches_per_sample = kwargs['batches_per_sample']
for nn in args:
ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),
nn.training_accuracies, label=nn.name)
ax.set_xlabel('Training steps')
ax.set_ylabel('Accuracy')
ax.set_title('Validation Accuracy During Training')
ax.legend(loc=4)
ax.set_ylim([0,1])
plt.yticks(np.arange(0, 1.1, 0.1))
plt.grid(True)
plt.show()
def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):
"""
Creates two networks, one with and one without batch normalization, then trains them
with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.
:param use_bad_weights: bool
If True, initialize the weights of both networks to wildly inappropriate weights;
if False, use reasonable starting weights.
:param learning_rate: float
Learning rate used during gradient descent.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param training_batches: (default 50000)
Number of batches to train.
:param batches_per_sample: (default 500)
How many batches to train before sampling the validation accuracy.
"""
# Use identical starting weights for each network to eliminate differences in
# weight initialization as a cause for differences seen in training performance
#
# Note: The networks will use these weights to define the number of and shapes of
# its layers. The original batch normalization paper used 3 hidden layers
# with 100 nodes in each, followed by a 10 node output layer. These values
# build such a network, but feel free to experiment with different choices.
# However, the input size should always be 784 and the final output should be 10.
if use_bad_weights:
# These weights should be horrible because they have such a large standard deviation
weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,10), scale=5.0).astype(np.float32)
]
else:
# These weights should be good because they have such a small standard deviation
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
# Just to make sure the TensorFlow's default graph is empty before we start another
# test, because we don't bother using different graphs or scoping and naming
# elements carefully in this sample code.
tf.reset_default_graph()
# build two versions of same network, 1 without and 1 with batch normalization
nn = NeuralNet(weights, activation_fn, False)
bn = NeuralNet(weights, activation_fn, True)
# train and test the two models
with tf.Session() as sess:
tf.global_variables_initializer().run()
nn.train(sess, learning_rate, training_batches, batches_per_sample)
bn.train(sess, learning_rate, training_batches, batches_per_sample)
nn.test(sess)
bn.test(sess)
# Display a graph of how validation accuracies changed during training
# so we can compare how the models trained and when they converged
plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)
"""
Explanation: There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.
We add batch normalization to layers inside the fully_connected function. Here are some important points about that code:
1. Layers with batch normalization do not include a bias term.
2. We use TensorFlow's tf.layers.batch_normalization function to handle the math. (We show lower-level ways to do this later in the notebook.)
3. We tell tf.layers.batch_normalization whether or not the network is training. This is an important step we'll talk about later.
4. We add the normalization before calling the activation function.
In addition to that code, the training step is wrapped in the following with statement:
python
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
This line actually works in conjunction with the training parameter we pass to tf.layers.batch_normalization. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.
Finally, whenever we train the network or perform inference, we use the feed_dict to set self.is_training to True or False, respectively, like in the following line:
python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
We'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization.
Batch Normalization Demos<a id='demos'></a>
This section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier.
We'd like to thank the author of this blog post Implementing Batch Normalization in TensorFlow. That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights.
Code to support testing
The following two functions support the demos we run in the notebook.
The first function, plot_training_accuracies, simply plots the values found in the training_accuracies lists of the NeuralNet objects passed to it. If you look at the train function in NeuralNet, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.
The second function, train_and_test, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling plot_training_accuracies to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks outside of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.relu)
"""
Explanation: Comparisons between identical networks, with and without batch normalization
The next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook.
The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.relu, 2000, 50)
"""
Explanation: As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.
If you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.)
The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.sigmoid)
"""
Explanation: As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)
In the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations.
The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.relu)
"""
Explanation: With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches.
The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.relu)
"""
Explanation: Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.
The next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.
End of explanation
"""
train_and_test(False, 1, tf.nn.sigmoid)
"""
Explanation: In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.
The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)
"""
Explanation: In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.
The cell below shows a similar pair of networks trained for only 2000 iterations.
End of explanation
"""
train_and_test(False, 2, tf.nn.relu)
"""
Explanation: As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.
The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 2, tf.nn.sigmoid)
"""
Explanation: With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.
The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)
"""
Explanation: Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.
However, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster.
End of explanation
"""
train_and_test(True, 0.01, tf.nn.relu)
"""
Explanation: In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose randome values with a standard deviation of 5. If you were really training a neural network, you would not want to do this. But these examples demonstrate how batch normalization makes your network much more resilient.
The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.
End of explanation
"""
train_and_test(True, 0.01, tf.nn.sigmoid)
"""
Explanation: As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them.
The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.
End of explanation
"""
train_and_test(True, 1, tf.nn.relu)
"""
Explanation: Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all.
The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.<a id="successful_example_lr_1"></a>
End of explanation
"""
train_and_test(True, 1, tf.nn.sigmoid)
"""
Explanation: The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.
The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.
End of explanation
"""
train_and_test(True, 2, tf.nn.relu)
"""
Explanation: Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.
The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.<a id="successful_example_lr_2"></a>
End of explanation
"""
train_and_test(True, 2, tf.nn.sigmoid)
"""
Explanation: We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.
The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.
End of explanation
"""
train_and_test(True, 1, tf.nn.relu)
"""
Explanation: In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.
Full Disclosure: Batch Normalization Doesn't Fix Everything
Batch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get different weights each time we run.
This section includes two examples that show runs when batch normalization did not help at all.
The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.
End of explanation
"""
train_and_test(True, 2, tf.nn.relu)
"""
Explanation: When we used these same parameters earlier, we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)
The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.
End of explanation
"""
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
num_out_nodes = initial_weights.shape[-1]
# Batch normalization adds additional trainable variables:
# gamma (for scaling) and beta (for shifting).
gamma = tf.Variable(tf.ones([num_out_nodes]))
beta = tf.Variable(tf.zeros([num_out_nodes]))
# These variables will store the mean and variance for this layer over the entire training set,
# which we assume represents the general population distribution.
# By setting `trainable=False`, we tell TensorFlow not to modify these variables during
# back propagation. Instead, we will assign values to these variables ourselves.
pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)
# Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.
# This is the default value TensorFlow uses.
epsilon = 1e-3
def batch_norm_training():
# Calculate the mean and variance for the data coming out of this layer's linear-combination step.
# The [0] defines an array of axes to calculate over.
batch_mean, batch_variance = tf.nn.moments(linear_output, [0])
# Calculate a moving average of the training data's mean and variance while training.
# These will be used during inference.
# Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter
# "momentum" to accomplish this and defaults it to 0.99
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean'
# and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.
# This is necessary because the those two operations are not actually in the graph
# connecting the linear_output and batch_normalization layers,
# so TensorFlow would otherwise just skip them.
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
# During inference, use the our estimated population mean and variance to normalize the layer
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
# Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute
# the operation returned from `batch_norm_training`; otherwise it will execute the graph
# operation returned from `batch_norm_inference`.
batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)
# Pass the batch-normalized layer output through the activation function.
# The literature states there may be cases where you want to perform the batch normalization *after*
# the activation function, but it is difficult to find any uses of that in practice.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
"""
Explanation: When we trained with these parameters and batch normalization earlier, we reached 90% validation accuracy. However, this time the network almost starts to make some progress in the beginning, but it quickly breaks down and stops learning.
Note: Both of the above examples use extremely bad starting weights, along with learning rates that are too high. While we've shown batch normalization can overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures.
Batch Normalization: A Detailed Look<a id='implementation_2'></a>
The layer created by tf.layers.batch_normalization handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization.
In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch inputs, but the average value coming out of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the next layer.
We represent the average as $\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$
$$
\mu_B \leftarrow \frac{1}{m}\sum_{i=1}^m x_i
$$
We then need to calculate the variance, or mean squared deviation, represented as $\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\mu_B$), which gives us what's called the "deviation" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.
$$
\sigma_{B}^{2} \leftarrow \frac{1}{m}\sum_{i=1}^m (x_i - \mu_B)^2
$$
Once we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
Above, we said "(almost) standard deviation". That's because the real standard deviation for the batch is calculated by $\sqrt{\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value 0.001. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch.
Why increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account.
At this point, we have a normalized value, represented as $\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\gamma$, and then add a beta value, $\beta$. Both $\gamma$ and $\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate.
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization after the non-linearity instead of before, but it is difficult to find any uses like that in practice.
In NeuralNet's implementation of fully_connected, all of this math is hidden inside the following line, where linear_output serves as the $x_i$ from the equations:
python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
The next section shows you how to implement the math directly.
Batch normalization without the tf.layers package
Our implementation of batch normalization in NeuralNet uses the high-level abstraction tf.layers.batch_normalization, found in TensorFlow's tf.layers package.
However, if you would like to implement batch normalization at a lower level, the following code shows you how.
It uses tf.nn.batch_normalization from TensorFlow's neural net (nn) package.
1) You can replace the fully_connected function in the NeuralNet class with the below code and everything in NeuralNet will still work like it did before.
End of explanation
"""
def batch_norm_test(test_training_accuracy):
"""
:param test_training_accuracy: bool
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
"""
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
tf.reset_default_graph()
# Train the model
bn = NeuralNet(weights, tf.nn.relu, True)
# First train the network
with tf.Session() as sess:
tf.global_variables_initializer().run()
bn.train(sess, 0.01, 2000, 2000)
bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)
"""
Explanation: This version of fully_connected is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:
It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.
It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \leftarrow \gamma \hat{x_i} + \beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.
Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call tf.assign are used to update these variables directly.
TensorFlow won't automatically run the tf.assign operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: with tf.control_dependencies([train_mean, train_variance]): before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the with block.
The actual normalization math is still mostly hidden from us, this time using tf.nn.batch_normalization.
tf.nn.batch_normalization does not have a training parameter like tf.layers.batch_normalization did. However, we still need to handle training and inference differently, so we run different code in each case using the tf.cond operation.
We use the tf.nn.moments function to calculate the batch mean and variance.
2) The current version of the train function in NeuralNet will work fine with this new version of fully_connected. However, it uses these lines to ensure population statistics are updated when using batch normalization:
python
if self.use_batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
Our new version of fully_connected handles updating the population statistics directly. That means you can also simplify your code by replacing the above if/else condition with just this line:
python
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
3) And just in case you want to implement every detail from scratch, you can replace this line in batch_norm_training:
python
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
with these lines:
python
normalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)
return gamma * normalized_linear_output + beta
And replace this line in batch_norm_inference:
python
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
with these lines:
python
normalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)
return gamma * normalized_linear_output + beta
As you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with linear_output representing $x_i$ and normalized_linear_output representing $\hat{x_i}$:
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
And the second line is a direct translation of the following equation:
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We still use the tf.nn.moments operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you.
Why the difference between training and inference?
In the original function that uses tf.layers.batch_normalization, we tell the layer whether or not the network is training by passing a value for its training parameter, like so:
python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
And that forces us to provide a value for self.is_training in our feed_dict, like we do in this example from NeuralNet's train function:
python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
If you looked at the low level implementation, you probably noticed that, just like with tf.layers.batch_normalization, we need to do slightly different things during training and inference. But why is that?
First, let's look at what happens when we don't. The following function is similar to train_and_test from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the test_training_accuracy parameter to test the network in training or inference modes (the equivalent of passing True or False to the feed_dict for is_training).
End of explanation
"""
batch_norm_test(True)
"""
Explanation: In the following cell, we pass True for test_training_accuracy, which performs the same batch normalization that we normally perform during training.
End of explanation
"""
batch_norm_test(False)
"""
Explanation: As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance of that batch. The "batches" we are using for these predictions have a single input each time, so their values are the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer.
Note: If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.
To overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it "normalize" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training.
So in the following example, we pass False for test_training_accuracy, which tells the network that we it want to perform inference with the population statistics it calculates during training.
End of explanation
"""
|
yttty/python3-scraper-tutorial
|
Python_Spider_Tutorial_03.ipynb
|
gpl-3.0
|
import re
import urllib.request
import urllib
from collections import deque
queue = deque()
visited = set()
url = 'http://news.dbanotes.net' # 入口页面, 可以换成别的
queue.append(url)
cnt = 0
while queue:
url = queue.popleft() # 队首元素出队
visited |= {url} # 标记为已访问
print('已经抓取: ' + str(cnt) + ' 正在抓取 <--- ' + url)
cnt += 1
urlop = urllib.request.urlopen(url)
if 'html' not in urlop.getheader('Content-Type'):
continue
# 避免程序异常中止, 用try..catch处理异常
try:
data = urlop.read().decode('utf-8')
except:
continue
# 正则表达式提取页面中所有队列, 并判断是否已经访问过, 然后加入待爬队列
linkre = re.compile('href="(.+?)"')
for x in linkre.findall(data):
if 'http' in x and x not in visited:
queue.append(x)
print('加入队列 ---> ' + x)
"""
Explanation: 用Python 3开发网络爬虫
By Terrill Yang (Github: https://github.com/yttty)
由你需要这些:Python3.x爬虫学习资料整理 - 知乎专栏整理而来。
本篇来自零基础自学用Python 3开发网络爬虫(三): 伪装浏览器君
用Python 3开发网络爬虫 - Chapter 03
上一次我们写了一个简陋的勉强能运行的爬虫alpha. alpha版有很多问题. 比如一个网站上不了, 爬虫却一直在等待连接返回response, 不知道超时跳过; 或者有的网站专门拦截爬虫程序, 我们的爬虫也不会伪装自己成为浏览器正规部队; 并且抓取的内容没有保存到本地, 没有什么作用. 这次我们一个个解决这些小问题.
此外, 如果你对http的get和post以及response这些名词一无所知, 请先参考 <<计算机网络--自顶向下方法>> 这本书的第二章的大部分内容.
在看的过程中, 安装一个叫做 Fiddler 的软件, 边学边实践, 观察浏览器是如何访问一个网站的, 如何发出请求, 如何处理响应, 如何进行跳转, 甚至如何通过登录认证. 有句话说得好, 越会用Fiddler, 就对理论理解更深刻; 越对理论理解深刻, Fiddler就用得越顺手. 最后我们在用爬虫去做各种各样的事情的时候, Fiddler总是最得力的助手之一.
下面就是我们上一次写的爬虫alpha版本。
End of explanation
"""
import urllib.request
url = 'http://www.baidu.com/'
req = urllib.request.Request(url, headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
})
oper = urllib.request.urlopen(req)
data = oper.read()
print(data.decode())
"""
Explanation: 添加超时跳过功能
首先, 我简单地将
python
urlop = urllib.request.urlopen(url)
改为
python
urlop = urllib.request.urlopen(url, timeout = 2)
运行后发现, 当发生超时, 程序因为exception中断. 于是我把这一句也放在try .. except 结构里, 问题解决.
支持自动跳转
在爬 http://baidu.com 的时候, 爬回来一个没有什么内容的东西, 这个东西告诉我们应该跳转到 http://www.baidu.com . 但是我们的爬虫并不支持自动跳转, 现在我们来加上这个功能, 让爬虫在爬 baidu.com 的时候能够抓取 www.baidu.com 的内容.
首先我们要知道爬 http://baidu.com 的时候他返回的页面是怎么样的, 这个我们既可以用 Fiddler 看, 也可以写一个小爬虫来抓取. 这里我抓到的内容如下, 你也应该尝试一下写几行 python 来抓一抓.
html
<html>
<meta http-equiv="refresh" content="0;url=http://www.baidu.com/">
</html>
看代码我们知道这是一个利用 html 的 meta 来刷新与重定向的代码, 其中的0是等待0秒后跳转, 也就是立即跳转. 这样我们再像上一次说的那样用一个正则表达式把这个url提取出来就可以爬到正确的地方去了. 其实我们上一次写的爬虫已经可以具有这个功能, 这里只是单独拿出来说明一下 http 的 meta 跳转.
伪装浏览器正规军
现在我们详细研究一下如何让网站们把我们的Python爬虫当成正规的浏览器来访. 因为如果不这么伪装自己, 有的网站就爬不回来了. 如果看过理论方面的知识, 就知道我们是要在 GET 的时候将 User-Agent 添加到header里.
如果没有看过理论知识, 按照以下关键字搜索学习吧 :D
HTTP 报文分两种: 请求报文和响应报文
请求报文的请求行与首部行
GET, POST, HEAD, PUT, DELETE 方法
我用 IE 浏览器访问百度首页的时候, 浏览器发出去的请求报文如下:
GET http://www.baidu.com/ HTTP/1.1
Accept: text/html, application/xhtml+xml, */*
Accept-Language: en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko
Accept-Encoding: gzip, deflate
Host: www.baidu.com
DNT: 1
Connection: Keep-Alive
Cookie: BAIDUID=57F4D171573A6B88A68789EF5DDFE87:FG=1; uc_login_unique=ccba6e8d978872d57c7654130e714abd; BD_UPN=11263145; BD
然后百度收到这个消息后, 返回给我的的响应报文如下(有删节):
```
HTTP/1.1 200 OK
Date: Mon, 29 Sep 2014 13:07:01 GMT
Content-Type: text/html; charset=utf-8
Connection: Keep-Alive
Vary: Accept-Encoding
Cache-Control: private
Cxy_all: baidu+8b13ba5a7289a37fb380e0324ad688e7
Expires: Mon, 29 Sep 2014 13:06:21 GMT
X-Powered-By: HPHP
Server: BWS/1.1
BDPAGETYPE: 1
BDQID: 0x8d15bb610001fe79
BDUSERID: 0
Set-Cookie: BDSVRTM=0; path=/
Set-Cookie: BD_HOME=0; path=/
Content-Length: 80137
<!DOCTYPE html>
<html><head><meta http-equiv="content-type" content="text/html;charset=utf-8"><meta http-equiv="X-UA-Compatible" content="IE=Edge"><title>百度一下,你就知道</title>
<!--STATUS OK--><link rel="dns-prefetch" href="//s1.bdstatic.com"/><link rel="dns-prefetch" href="//t1.baidu.com"/><link rel="dns-prefetch" href="//t2.baidu.com"/><link rel="dns-prefetch" href="//t3.baidu.com"/><link rel="dns-prefetch" href="//t10.baidu.com"/><link rel="dns-prefetch" href="//t11.baidu.com"/><link rel="dns-prefetch" href="//t12.baidu.com"/><link rel="dns-prefetch" href="//b1.bdstatic.com"/><style index="index" > ..........这里省略两万字................ </script></body></html>
```
如果能够看懂这段话的第一句就OK了, 别的可以以后再配合 Fiddler 慢慢研究. 所以我们要做的就是在 Python 爬虫向百度发起请求的时候, 顺便在请求里面写上 User-Agent, 表明自己是浏览器君.
在 GET 的时候添加 ```header``` 有很多方法, 下面介绍两种方法.
第一种方法比较简便直接, 但是不好扩展功能, 代码如下:
End of explanation
"""
import urllib.request
import http.cookiejar
# head: dict of header
def makeMyOpener(head = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
}):
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
oper = makeMyOpener()
uop = oper.open('http://www.baidu.com/', timeout = 1000)
data = uop.read()
print(data.decode())
"""
Explanation: 第二种方法使用了 build_opener 这个方法, 用来自定义 opener, 这种方法的好处是可以方便的拓展功能, 例如下面的代码就拓展了自动处理 Cookies 的功能.
End of explanation
"""
def saveFile(data):
save_path = './data/temp.out'
f_obj = open(save_path, 'wb') # wb 表示打开方式
f_obj.write(data)
f_obj.close()
"""
Explanation: 上述代码运行后通过 Fiddler 抓到的 GET 报文如下所示:
GET http://www.baidu.com/ HTTP/1.1
Accept-Encoding: identity
Connection: close
Host: www.baidu.com
User-Agent: Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko
Accept: text/html, application/xhtml+xml, */*
Accept-Language: en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3
可见我们在代码里写的东西都添加到请求报文里面了.
保存抓回来的报文
顺便说说文件操作. Python 的文件操作还是相当方便的. 我们可以讲抓回来的数据 data 以二进制形式保存, 也可以经过 decode() 处理成为字符串后以文本形式保存. 改动一下打开文件的方式就能保存不同格式的文件了. 下面是参考代码:
End of explanation
"""
import re
import urllib.request
import urllib
import http.cookiejar
from collections import deque
# head: dict of header
def makeMyOpener(head = {
'Connection': 'Keep-Alive',
'Accept': 'text/html, application/xhtml+xml, */*',
'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
}):
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
header = []
for key, value in head.items():
elem = (key, value)
header.append(elem)
opener.addheaders = header
return opener
def saveFile(data, save_path = './temp.out'):
f_obj = open(save_path, 'wb') # wb 表示打开方式
f_obj.write(data)
f_obj.close()
queue = deque()
visited = set()
url = 'http://news.dbanotes.net' # 入口页面, 可以换成别的
queue.append(url)
cnt = 0
oper = makeMyOpener()
while queue:
url = queue.popleft() # 队首元素出队
visited |= {url} # 标记为已访问
print('已经抓取: ' + str(cnt) + ' 正在抓取 <--- ' + url)
cnt += 1
urlop = oper.open(url, timeout=5)
if 'html' not in urlop.getheader('Content-Type'):
continue
# 避免程序异常中止, 用try..catch处理异常
try:
data = urlop.read().decode('utf-8')
#saveFile(data, './data/'+url+'.out')
except:
print('Error in: ', url)
continue
# 正则表达式提取页面中所有队列, 并判断是否已经访问过, 然后加入待爬队列
linkre = re.compile('href="(.+?)"')
for x in linkre.findall(data):
if 'http' in x and x not in visited:
queue.append(x)
print('加入队列 ---> ' + x)
"""
Explanation: 经过上面的改进,我们的爬虫现在可以处理更多类型的网页了,先看一下这一阶段我们的成果吧
End of explanation
"""
|
GEMScienceTools/rmtk
|
notebooks/vulnerability/derivation_fragility/hybrid_methods/N2/N2.ipynb
|
agpl-3.0
|
from rmtk.vulnerability.derivation_fragility.hybrid_methods.N2 import N2Method
from rmtk.vulnerability.common import utils
%matplotlib inline
"""
Explanation: N2 - Eurocode 8, CEN (2005)
This simplified nonlinear procedure for the estimation of the seismic response of structures uses capacity curves and inelastic spectra. This method has been developed to be used in combination with code-based response spectra, but it is also possible to employ it for the assessment of structural response subject to ground motion records. It also has the distinct aspect of assuming an elastic-perfectly plastic force-displacement relationship in the construction of the bilinear curve. This method is part of recommendations of the Eurocode 8 (CEN, 2005) for the seismic design of new structures, and the capacity curves are usually simplified by a elasto-perfectly plastic relationship.
Note: To run the code in a cell:
Click on the cell to select it.
Press SHIFT+ENTER on your keyboard or press the play button (<button class='fa fa-play icon-play btn btn-xs btn-default'></button>) in the toolbar above.
End of explanation
"""
capacity_curves_file = "../../../../../../rmtk_data/capacity_curves_Sa-Sd.csv"
capacity_curves = utils.read_capacity_curves(capacity_curves_file)
utils.plot_capacity_curves(capacity_curves)
"""
Explanation: Load capacity curves
In order to use this methodology, it is necessary to provide one (or a group) of capacity curves, defined according to the format described in the RMTK manual.
Please provide the location of the file containing the capacity curves using the parameter capacity_curves_file.
End of explanation
"""
gmrs_folder = "../../../../../../rmtk_data/accelerograms"
minT, maxT = 0.1, 2.0
gmrs = utils.read_gmrs(gmrs_folder)
#utils.plot_response_spectra(gmrs, minT, maxT)
"""
Explanation: Load ground motion records
Please indicate the path to the folder containing the ground motion records to be used in the analysis through the parameter gmrs_folder.
Note: Each accelerogram needs to be in a separate CSV file as described in the RMTK manual.
The parameters minT and maxT are used to define the period bounds when plotting the spectra for the provided ground motion fields.
End of explanation
"""
damage_model_file = "../../../../../../rmtk_data/damage_model.csv"
damage_model = utils.read_damage_model(damage_model_file)
"""
Explanation: Load damage state thresholds
Please provide the path to your damage model file using the parameter damage_model_file in the cell below.
The damage types currently supported are: capacity curve dependent, spectral displacement and interstorey drift. If the damage model type is interstorey drift the user can provide the pushover curve in terms of Vb-dfloor to be able to convert interstorey drift limit states to roof displacements and spectral displacements, otherwise a linear relationship is assumed.
End of explanation
"""
damping_ratio = 0.05
PDM, Sds = N2Method.calculate_fragility(capacity_curves, gmrs, damage_model, damping_ratio)
"""
Explanation: Obtain the damage probability matrix
The parameter damping_ratio needs to be defined in the cell below in order to calculate the damage probability matrix.
End of explanation
"""
IMT = "Sa"
period = 0.3
regression_method = "least squares"
fragility_model = utils.calculate_mean_fragility(gmrs, PDM, period, damping_ratio,
IMT, damage_model, regression_method)
"""
Explanation: Fit lognormal CDF fragility curves
The following parameters need to be defined in the cell below in order to fit lognormal CDF fragility curves to the damage probability matrix obtained above:
1. IMT: This parameter specifies the intensity measure type to be used. Currently supported options are "PGA", "Sd" and "Sa".
2. period: This parameter defines the time period of the fundamental mode of vibration of the structure.
3. regression_method: This parameter defines the regression method to be used for estimating the parameters of the fragility functions. The valid options are "least squares" and "max likelihood".
End of explanation
"""
minIML, maxIML = 0.01, 3.00
utils.plot_fragility_model(fragility_model, minIML, maxIML)
# utils.plot_fragility_stats(fragility_statistics,minIML,maxIML)
"""
Explanation: Plot fragility functions
The following parameters need to be defined in the cell below in order to plot the lognormal CDF fragility curves obtained above:
* minIML and maxIML: These parameters define the limits of the intensity measure level for plotting the functions
End of explanation
"""
taxonomy = "RC"
minIML, maxIML = 0.01, 3.00
output_type = "csv"
output_path = "../../../../../../rmtk_data/output/"
utils.save_mean_fragility(taxonomy, fragility_model, minIML, maxIML, output_type, output_path)
"""
Explanation: Save fragility functions
The derived parametric fragility functions can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
2. minIML and maxIML: These parameters define the bounds of applicability of the functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
cons_model_file = "../../../../../../rmtk_data/cons_model.csv"
imls = [0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.35, 0.40, 0.45, 0.50,
0.60, 0.70, 0.80, 0.90, 1.00, 1.20, 1.40, 1.60, 1.80, 2.00,
2.20, 2.40, 2.60, 2.80, 3.00, 3.20, 3.40, 3.60, 3.80, 4.00]
distribution_type = "lognormal"
cons_model = utils.read_consequence_model(cons_model_file)
vulnerability_model = utils.convert_fragility_vulnerability(fragility_model, cons_model,
imls, distribution_type)
"""
Explanation: Obtain vulnerability function
A vulnerability model can be derived by combining the set of fragility functions obtained above with a consequence model. In this process, the fractions of buildings in each damage state are multiplied by the associated damage ratio from the consequence model, in order to obtain a distribution of loss ratio for each intensity measure level.
The following parameters need to be defined in the cell below in order to calculate vulnerability functions using the above derived fragility functions:
1. cons_model_file: This parameter specifies the path of the consequence model file.
2. imls: This parameter specifies a list of intensity measure levels in increasing order at which the distribution of loss ratios are required to be calculated.
3. distribution_type: This parameter specifies the type of distribution to be used for calculating the vulnerability function. The distribution types currently supported are "lognormal", "beta", and "PMF".
End of explanation
"""
utils.plot_vulnerability_model(vulnerability_model)
"""
Explanation: Plot vulnerability function
End of explanation
"""
taxonomy = "RC"
output_type = "csv"
output_path = "../../../../../../rmtk_data/output/"
utils.save_vulnerability(taxonomy, vulnerability_model, output_type, output_path)
"""
Explanation: Save vulnerability function
The derived parametric or nonparametric vulnerability function can be saved to a file in either CSV format or in the NRML format that is used by all OpenQuake input models. The following parameters need to be defined in the cell below in order to save the lognormal CDF fragility curves obtained above:
1. taxonomy: This parameter specifies a taxonomy string for the the fragility functions.
3. output_type: This parameter specifies the file format to be used for saving the functions. Currently, the formats supported are "csv" and "nrml".
End of explanation
"""
|
AshtonIzmev/deep-learning-python-snippets
|
python/notebook/keras-digits.ipynb
|
mit
|
# Here's a Deep Dumb MLP (DDMLP)
model = Sequential()
model.add(Dense(input_dim, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128, 128, init='lecun_uniform'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128, nb_classes, init='lecun_uniform'))
model.add(Activation('relu'))
model.compile(loss='mse', optimizer='rmsprop')
model.fit(X_train, y_train, nb_epoch=30, batch_size=8, validation_split=0.05, show_accuracy=False, verbose=0)
accuracy_score(y_valid, model.predict_classes(X_valid))
"""
Explanation: Modèle 1
End of explanation
"""
#Yet another model
model1 = Sequential()
# first convolutional layer
model1.add(Convolution2D(32,1,2,2))
model1.add(Activation('relu'))
# second convolutional layer
model1.add(Convolution2D(48, 32, 2, 2))
model1.add(Activation('relu'))
model1.add(MaxPooling2D(poolsize=(2,2)))
# third convolutional layer
model1.add(Convolution2D(32, 48, 2, 2))
model1.add(Activation('relu'))
model1.add(MaxPooling2D(poolsize=(2,2)))
# convert convolutional filters to flatt so they can be feed to
# fully connected layers
model1.add(Flatten())
# first fully connected layer
model1.add(Dense(32*6*6, 128, init='lecun_uniform'))
model1.add(Activation('relu'))
model1.add(Dropout(0.25))
# second fully connected layer
model1.add(Dense(128, 128, init='lecun_uniform'))
model1.add(Activation('relu'))
model1.add(Dropout(0.25))
# last fully connected layer which output classes
model1.add(Dense(128, 10, init='lecun_uniform'))
model1.add(Activation('softmax'))
# setting sgd optimizer parameters
sgd = SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='mse', optimizer=sgd)
model1.fit(X_train_reshaped, y_train, nb_epoch=1, batch_size=1000, validation_split=0.1, show_accuracy=True,
verbose=1)
accuracy_score(y_valid, model1.predict_classes(X_valid_reshaped))
a = model1.predict_classes(X_valid_reshaped)
a
"""
Explanation: Ce model donne 0.97171 avec les paramètres
nb_epoch=10, batch_size=16, validation_split=0.1
Ce model donne 0.97400 avec les paramètres
nb_epoch=30, batch_size=8, validation_split=0.05
Modèle 2
End of explanation
"""
# Here is another model
model2 = Sequential()
model2.add(Convolution2D(32, 3, 3, 3, border_mode='full'))
model2.add(Activation('relu'))
model2.add(Convolution2D(32, 32, 3, 3))
model2.add(Activation('relu'))
model2.add(MaxPooling2D(poolsize=(2, 2)))
model2.add(Dropout(0.25))
model2.add(Convolution2D(64, 32, 3, 3, border_mode='full'))
model2.add(Activation('relu'))
model2.add(Convolution2D(64, 64, 3, 3))
model2.add(Activation('relu'))
model2.add(MaxPooling2D(poolsize=(2, 2)))
model2.add(Dropout(0.25))
model2.add(Flatten())
model2.add(Dense(64*8*8, 256))
model2.add(Activation('relu'))
model2.add(Dropout(0.5))
model2.add(Dense(256, nb_classes))
model2.add(Activation('relu'))
#sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='categorical_crossentropy', optimizer=sgd)
# we'll use MSE (mean squared error) for the loss, and RMSprop as the optimizer
#model.compile(loss='mse', optimizer='rmsprop')
model2.compile(loss='mse', optimizer='rmsprop')
model2.fit(X_train_reshaped, y_train, nb_epoch=1, batch_size=16, validation_split=0.1, show_accuracy=False,
verbose=0)
accuracy_score(y_valid, model.predict_classes(X_valid))
preds = model1.predict_classes(X_test, verbose=0)
def write_preds(preds, fname):
pd.DataFrame({"ImageId": list(range(1,len(preds)+1)), "Label": preds}).to_csv(fname, index=False, header=True)
write_preds(preds, "../../result/keras-mlp.csv")
"""
Explanation: Modèle 3
End of explanation
"""
|
oseledets/talks-online
|
kaiserslautern-2018/lecture-1.ipynb
|
cc0-1.0
|
import numpy as np
import matplotlib.pyplot as plt
from numpy.polynomial import Chebyshev as T
from numpy.polynomial.hermite import hermval
%matplotlib inline
def p_cheb(x, n):
"""
RETURNS T_n(x)
value of not normalized Chebyshev polynomials
$\int \frac1{\sqrt{1-x^2}}T_m(x)T_n(x) dx = \frac\pi2\delta_{nm}$
"""
return T.basis(n)(x)
def p_herm(x, n):
"""
RETURNS H_n(x)
value of non-normalized Probabilistic polynomials
"""
cf = np.zeros(n+1)
cf[n] = 1
return (2**(-float(n)*0.5))*hermval(x/np.sqrt(2.0), cf)
def system_mat(pnts, maxn, poly):
"""
RETURNS system matrix
"""
A = np.empty((pnts.size, maxn), dtype=float)
for i in range(maxn):
A[:, i] = poly(pnts, i)
return A
x = np.linspace(-1, 1, 1000)
data = []
for i in range(5):
data.append(x)
data.append(p_cheb(x, i))
plt.plot(*data)
plt.legend(["power = {}".format(i) for i in range(len(data))]);
def complex_func(x):
return np.sin(2.0*x*np.pi)*np.cos(0.75*(x+0.3)*np.pi)
plt.plot(x, complex_func(x));
"""
Explanation: Multivariate function approximation: methods and tools
Overall plan
(Today) Multivariate function approximation: curse of dimensionality, polynomial chaos, optimal experiment design, connection to linear algebra.
Tensor decompositions:
Deep learning methods.
Uncertainty quantification
<img src='entire_process.png'>
Uncertainty quantification
Numerical simulations assume models of real world; these models pick uncertainties
In coefficients
In right-hand sides
Models are approximate
Forward and inverse problems
UQ splits into two major branches: forward and inverse problems
Roughly speaking, UQ divides into two major branches, forward and inverse problems.
Forward problems
In the forward propagation of uncertainty, we have a known model F for a system of interest.
We model its inputs $X$ as a random variable and wish to understand the output random variable
$$Y = F(X)$$
(also denoted $Y \vert X$) and reads $Y$ given $X$.
Also, this is related to sensitivity analysis (how random variations in $X$ influence variation in $Y$).
Inverse problems
In inverse problems, $F$ is a forward model, but $Y$ is observed data, and we want to find the input data $X$
such that $F(X) = Y$, i.e. we want $X \vert Y$ instead of $Y \vert X$.
Inverse problems are typically ill-posed in the usual sense, so we need an expert (or prior)
about what a good solution $X$ might be.
Bayesian perspective becomes the method of choice, but this requires the representation of high-dimensional distributions.
$$p(X \vert Y) = \frac{p(Y \vert X) p(X)}{p(Y)}.$$
Approximation of multivariate functions
If we want to do efficient UQ (not only Monte-Carlo) we need efficient tools for the approximation of multivariate functions.
Curse of dimensionality
Complexity to approximation a $d$-variate function grows exponentially with $d$.
Methods that one case use
Polynomial / Fourier type approximations
Sparse polynomial approximations / best N-term approximations
ANOVA decomposition / sparse grids
Gaussian process regression
Tensor decompositions (Friday)
Deep learning (Friday)
Consider orthogonal polynomials ${p_n}$
$$
\langle p_n,\, p_m \rangle = \int_a^bp_n(x)p_m(x)w(x)\,dx=\delta_{nm}h_n.
$$
- Chebyshev polynomials of the first kind, $(a,\,b)=(-1,\,1)$, $w(x)=\left(1-x^2\right)^{-1/2}$
- Hermite polynomials (mathematical or probabilistic), $(a,\,b)=(-\infty,\,+\infty)$, $w(x)=\frac1{\sqrt{2\pi}}\exp\left(-x^2/2\right)$
End of explanation
"""
n = 6
M = n
nodes = np.linspace(-1, 1, M)
RH = complex_func(nodes)
A = system_mat(nodes, n, p_cheb)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
alpha = np.linalg.lstsq(A, RH)[0]
print("α = {}".format(alpha))
def calc_apprximant(poly, alpha, x):
"""
RETURNS values of approximant in points x
"""
n = len(alpha)
y = np.zeros_like(x)
for i in range(n):
y[...] += poly(x, i)*alpha[i]
return y
y = complex_func(x)
approx_y = calc_apprximant(p_cheb, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro');
"""
Explanation: Now, let's approximate the function with polynomials taking different maximal power $n$ and the corresponding number of node points
$$
f(x)\approx\hat f(x)=\sum_{i=0}^n\alpha_i p_i(x)
$$
End of explanation
"""
epsilon = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon))
"""
Explanation: Approximate value of the error
$$
\varepsilon=
\|f-\hat f\|\infty\approx\max{x\in \mathcal X}\bigl|f(x)-\hat f(x)\bigr|
$$
End of explanation
"""
A = system_mat(nodes, n, p_herm)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
alpha = np.linalg.lstsq(A, RH)[0]
print("α = {}".format(alpha))
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
epsilon = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon))
"""
Explanation: If we take another set of polynomials, the result of the approximation will be the same (coefficients $\alpha$ will be different of course).
End of explanation
"""
nodes = np.cos((2.0*np.arange(M) + 1)/M*0.5*np.pi)
RH = complex_func(nodes)
A = system_mat(nodes, n, p_herm)
alpha = np.linalg.solve(A, RH)
print("α = {}".format(alpha))
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
epsilon_cheb = np.linalg.norm(y - approx_y, np.inf)
print("ε_cheb = {}".format(epsilon_cheb))
# All in one. We can play with maximum polynomial power
def plot_approx(f, n, distrib='unif', poly='cheb'):
def make_nodes(n, distrib='unif'):
return {'unif' : lambda : np.linspace(-1, 1, n),
'cheb' : lambda : np.cos((2.0*np.arange(n) + 1.0)/n*0.5*np.pi)}[distrib[:4].lower()]
poly_f = {'cheb' : p_cheb, 'herm' : p_herm}[poly[:4].lower()]
#solve
nodes = make_nodes(n, distrib)()
RH = f(nodes)
A = system_mat(nodes, n, p_herm)
alpha = np.linalg.solve(A, RH)
# calc values
x = np.linspace(-1, 1, 2**10)
y = f(x)
approx_y = calc_apprximant(p_herm, alpha, x)
#plot
plt.figure(figsize=(14,6.5))
plt.plot(x, y, x, approx_y, nodes, RH, 'ro')
plt.show()
# calc error
epsilon_cheb = np.linalg.norm(y - approx_y, np.inf)
print("ε = {}".format(epsilon_cheb))
from ipywidgets import interact, fixed, widgets
interact(plot_approx,
f=fixed(complex_func),
n=widgets.IntSlider(min=1,max=15,step=1,value=4,continuous_update=True,description='# of terms (n)'),
distrib=widgets.ToggleButtons(options=['Uniform', 'Chebyshev roots'],description='Points distr.'),
poly=widgets.ToggleButtons(options=['Chebyshev polynomials', 'Hermite polynomials'],description='Poly. type')
);
"""
Explanation: Now, what will change if we take another set of node points?
End of explanation
"""
# Scale the function a little
scale = 5.0
big_x = np.random.randn(int(1e6))
big_y = complex_func(big_x/scale)
mean = np.mean(big_y)
var = np.std(big_y)**2
print ("mean = {}, variance = {}".format(mean, var))
def p_herm_snorm(n):
"""
Square norm of "math" Hermite (w = exp(-x^2/2)/sqrt(2*pi))
"""
return np.math.factorial(n)
n = 15
M = n
nodes = np.linspace(-scale, scale, M)
RH = complex_func(nodes/scale)
A = system_mat(nodes, n, p_herm)
if n == M:
alpha = np.linalg.solve(A, RH)
else:
W = np.diag(np.exp( -nodes**2*0.5))
alpha = np.linalg.lstsq(W.dot(A), W.dot(RH))[0]
h = np.array([p_herm_snorm(i) for i in range(len(alpha))])
var = np.sum(alpha[1:]**2*h[1:])
print ("mean = {}, variance = {}".format(alpha[0]*h[0], var))
"""
Explanation: Random input
Let input $x$ is random with known probability density function $\rho$.
We want to know statistical properties of the output
- mean value
- variance
- risk estimation
How to find them using polynomial expansion?
Assume the function $f$ is analytical
$$
f(x)=\sum_{i=0}^\infty \alpha_i p_i(x).
$$
The mean value of $f$ is
$$
\mathbb E f = \int_a^bf(\tau)\rho(\tau)\,d\tau =
\sum_{i=0}^\infty \int_a^b\alpha_i p_i(\tau)\rho(\tau)\,d\tau.
$$
If the set of orthogonal polynomials ${p_n}$ have the same wight function as $\rho$,
and the first polynomial is constant $p_0(x)=h_0$,
then $\mathbb Ef=\alpha_0h_0$.
Usually, $h_0=1$ and we get a simple relation
$$
\mathbb Ef = \alpha_0
$$
The variance is equal to
$$
\text{Var } f=\mathbb E\bigl(f-\mathbb E f\bigr)^2=
\int_a^b \left(\sum_{i=1}^\infty\alpha_ip_i(\tau)\right)^2\rho(\tau)\,d\tau ,
$$
note, that the summation begins with 1. Assume we can interchange the sum and the integral, then
$$
\text{Var } f=
\sum_{i=1}^\infty\sum_{j=1}^\infty\int_a^b !!\alpha_ip_i(\tau)\,\alpha_jp_j(\tau)\,\rho(\tau)\,d\tau =
\sum_{i=1}^\infty \alpha_i^2h_i.
$$
The formula is very simple if all the coefficients ${h_i}$ are equal to 1
$$
\text{Var } f = \sum_{i=1}^\infty \alpha_i^2
$$
Let us check the formulas for the mean and variance by calculating them using the Monte-Carlo method.
Normal distribution
First, consider the case of normal distrubution of the input $x\sim\mathcal N(0,1)$,
$\rho(x)=\frac1{\sqrt{2\pi}}\exp(-x^2/2)$,
so we take Hermite polynomials.
End of explanation
"""
ex = 2
x = np.linspace(-scale - ex, scale + ex, 10000)
y = complex_func(x/scale)
approx_y = calc_apprximant(p_herm, alpha, x)
plt.plot(x, y, x, approx_y, nodes, RH, 'ro');
"""
Explanation: Note, that the precise values are
$$
\mathbb E f = -0.16556230699\ldots,
\qquad
\text{Var }f= 0.23130350880\ldots
$$
so, the method based on polynomial expansion is more precise than Monte-Carlo.
End of explanation
"""
# 1-D example
from ipywidgets import interactive, interact, widgets
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial as SP
# defining Squared Exponential Kernel and plot it
def k(length_scale):
x = np.arange(0., 5., 0.1)
plt.figure(figsize=(10, 7))
plt.ylim([0, 1.05])
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$k(x,0)$', fontsize=16)
plt.plot(x, np.exp(-.5 * x**2/length_scale**2), 'b-')
plt.show()
controls = {r'length_scale': widgets.FloatSlider(
min=0.01, max=5.0, step=0.1, value=1., continuous_update=False, description=r'$\ell$')}
from ipywidgets import interactive
import matplotlib.pyplot as plt
import numpy as np
def GP(length_scale, Test, Training, sigma):
np.random.seed(100)
""" This is code for simple GP regression. It assumes a zero mean GP Prior """
# This is the true unknown function we are trying to approximate
def f(x): return np.sin(0.9*x.flatten())
# Define the kernel
def kernel(a, b):
sqdist = SP.distance.cdist(a, b, 'sqeuclidean')
return np.exp(-.5 * sqdist/(length_scale**2))
N = Training # number of training points.
n = Test # number of test points.
s = sigma # noise variance.
# Sample some input points and noisy versions of the function evaluated at
# these points.
X = np.random.uniform(-5, 5, size=(N, 1))
y = f(X) + s*np.random.randn(N)
K = kernel(X, X)
L = np.linalg.cholesky(K + s*np.eye(N))
# points we're going to make predictions at.
Xtest = np.linspace(-5, 5, n)[:, None]
# compute the mean at our test points.
Lk = np.linalg.solve(L, kernel(X, Xtest))
mu = np.dot(Lk.T, np.linalg.solve(L, y))
# compute the variance at our test points.
K_ = kernel(Xtest, Xtest)
s2 = np.diag(K_) - np.sum(Lk**2, axis=0)
s = np.sqrt(s2)
# PLOTS:
plt.figure(figsize=(9, 7))
plt.clf()
plt.plot(X, y, 'r+', ms=18, label="Training points")
plt.plot(Xtest, f(Xtest), 'b-', label="Function")
plt.gca().fill_between(Xtest.flat, mu-s, mu+s,
color="#dddddd", label="Confidence interval")
plt.plot(Xtest, mu, 'r--', lw=2, label="Approximation")
plt.title(r'Mean prediction plus-minus one s.d.')
plt.xlabel('$x$', fontsize=16)
plt.ylabel('$f(x)$', fontsize=16)
plt.axis([-5, 5, -3, 3])
plt.legend()
print("Error (inf. norm) = ", np.linalg.norm(f(Xtest)-mu, ord=np.inf)/np.linalg.norm(f(Xtest), ord=np.inf))
plt.show()
controls = {r'sigma': widgets.FloatSlider(min=5e-4, max=5e-1, step=1e-3, value=1e-3, continuous_update=True, description=r'$\sigma$'),
r'length_scale': widgets.FloatSlider(min=0.1, max=2.0, step=0.05, value=0.7, continuous_update=True, description=r'$\ell$'),
r'Training': widgets.IntSlider(min=1, max=50, step=1, value=10, continuous_update=True, description=r'$N$ of $f$ evals'),
r'Test': widgets.IntSlider(min=1, max=100, step=1, value=50, continuous_update=True, description=r'$N$ of GP samples')}
interact(GP, **controls);
"""
Explanation: Linear model
The model described above is a special case of linear model: we fix a basis set and obtain
$$f(x) \approx \sum_{k=1}^M c_k \phi_k(x).$$
For $x \in \mathbb{R}^d$ what basis set to choose?
Why tensor-product polynomials are bad for large $d$ ?
What are the alternatives to tensor-product basis?
Good approach: sparse polynomial bases
Instead of taking all possible $\mathbf{x}^{\mathbf{\alpha}}$, we take only a subset, such as:
Total degree: $\vert \mathbf{\alpha} \vert \leq T$
Hyperbolic cross scheme
For very smooth functions, such approximations work really well (and simple to use!).
Experiment design
Given a linear model,
$$f(x) \approx \sum_{k=1}^M c_k \phi_k(x).$$
How to find coefficients?
Sampling
Answer: do sampling,
and solve linear least squares
$$f(x_i) \approx \sum_{k=1}^M c_k \phi_k(x_i).$$
How to sample?
Sampling methods
$$f(x_i) \approx \sum_{k=1}^M c_k \phi_k(x_i).$$
Non-adaptive schemes: Monte-Carlo, Quasi-Monte Carlo, Latin Hypercube Sampling (LHS)
Adaptive: optimize for points $x_1, \ldots, x_N$.
There are many criteria.
D-optimality
If we select $N = M$ and select points such that
$$\vert \det M \vert \rightarrow \max,$$
where
$$M_{kj} = \phi_k(x_i)$$ is the design matrix.
Why it is good?
Linear algebra: maximum volume
Let $A \in \mathbb{R}^{n \times r}$, $n \gg r$.
Let $\widehat{A}$ be the submatrix of maximum volume.
Then, all coefficients in $A \widehat{A}^{-1}$ are less than $1$ in modulus.
As a simple consequence,
we have
$$E_{D} \leq (r+1) E_{best},$$
where $E_D$ is the approximation from optimal design, and $E_{best}$ is the best possibble approximation error in the Chebyshev norm.
Problem setting
We have an unknown multivariate function $f(\mathbf{x})$ that we would like to approximate on some specified domain.<br>
We have a dataset $\mathcal{D}$ of $n$ function observations, $\mathcal{D} = {(\mathbf{x}_i,y_i),i = 1,\ldots,n}$.
Given $\mathcal{D}$ we wish to make predictions for new inputs $\mathbf{x}_*$ within the domain.
To solve this problem we must make assumptions about the characteristics of $f(\mathbf{x})$.
Two common approaches
restrict the class of functions that we consider (e.g. considering only linear functions)
Problem: we have to decide upon the richness of the class of functions considered; $f(\mathbf{x})$ can be not well modelled by this class, so the predictions will be poor.
the second approach is (speaking rather loosely) to give a prior probability to every possible function, where higher probabilities are given to functions that we consider to be more likely.
<span style="color:red">Problem: there are an uncountably infinite set of possible functions.</span>
Second approach
This is where the Gaussian process (GP) arise to cope with the problem mentioned above
Typically, there is some knowledge about a function of interest $f(\mathbf{x})$ even before observing it anywhere.
For example, $f(\mathbf{x})$ cannot exceed, or be smaller than, certain values or that it is periodic or that it shows translational invariance.<br>
Such knowledge is called as the prior knowledge.
Prior knowledge may be precise (e.g., $f(\mathbf{x})$ is twice differentiable), or it may be vague (e.g., the probability that the periodicity is $T$ is $p(T)$). When we have a deal with vague prior knowledge, we refer to it as prior belief.
Prior beliefs about $f(\mathbf{x})$ can be modeled by a probability measure on the space of functions from $\mathcal{F}$ to $\mathbb{R}$. A GP is a great way to represent this probability measure.
Definition of GP
A Gaussian process is a collection of random variables, any finite number of which have a joint Gaussian distribution (in other words GP is a generalization of a multivariate Gaussian distribution to infinite dimensions).
A GP defines a probability measure on $\mathcal{F}$. When we say that $f(\mathbf{x})$ is a GP, we mean that it is a random variable that is actually a function.
Analytically, it can be written as
$$
f(\mathbf{x}) \sim \mbox{GP}\left(m(\mathbf{x}), k(\mathbf{x},\mathbf{x'}) \right),
$$ where
* $m:\mathbb{R}^d \rightarrow \mathbb{R}$ is the mean function;
* $k:\mathbb{R}^d \times \mathbb{R}^d \rightarrow \mathbb{R}$ is the covariance function.
Connection to the multivariate Gaussian distribution
Let $\mathbf{x}{1:n}={\mathbf{x}_1,\dots,\mathbf{x}_n}$ be $n$ points in $\mathbb{R}^d$. Let $\mathbf{f}\in\mathbb{R}^n$ be the outputs of $f(\mathbf{x})$ on each one of the elements of $\mathbf{x}{1:n}$,
$$
\mathbf{f} =
\left(
\begin{array}{c}
f(\mathbf{x}1)\
\vdots\
f(\mathbf{x}_n)
\end{array}
\right).
$$
The fact that $f(\mathbf{x})$ is a GP with mean and covariance function $m(\mathbf{x})$ and $k(\mathbf{x},\mathbf{x'})$ means that the vector of outputs $\mathbf{f}$ at the arbitrary inputs is the following multivariate-normal: $$
\mathbf{f} \sim \mathcal{N}\bigl(\mathbf{m}(\mathbf{x}{1:n}), \mathbf{K}(\mathbf{x}{1:n}, \mathbf{x}{1:n})\bigr),
$$ with mean vector: $$
\mathbf{m}(\mathbf{x}{1:n}) =
\left(
\begin{array}{c}
m(\mathbf{x}_1)\
\vdots\
m(\mathbf{x}_n)
\end{array}
\right),
$$ and covariance matrix: $$
\mathbf{K}(\mathbf{x}{1:n},\mathbf{x}_{1:n}) = \left(
\begin{array}{ccc}
k(\mathbf{x}_1,\mathbf{x}_1) & \dots &k(\mathbf{x}_1, \mathbf{x}_n)\
\vdots& \ddots& \vdots\
k(\mathbf{x}_n, \mathbf{x}_1)& \dots &k(\mathbf{x}_n, \mathbf{x}_n)
\end{array}
\right).
$$
Now, since we have defined a GP, let us talk about how do we encode our prior beliefs into a GP.
We do so through the mean and covariance functions.
Interpretation of the mean function
For any point $\mathbf{x}\in\mathbb{R}^d$, $m(\mathbf{x})$ is the expected value of the r.v. $f(\mathbf{x})$:
$$
m(\mathbf{x}) = \mathbb{E}[f(\mathbf{x})].
$$
The mean function can be any arbitrary function. Essentially, it tracks generic trends in the response as the input is varied.<br>
In practice, we try and make a suitable choice for the mean function that is easy to work with. Such choices include:
* a constant, $m(\mathbf{x}) = c,$ where $c$ is a parameter (in many cases $c=0$).
* linear, $m(\mathbf{x}) = c_0 + \sum_{i=1}^dc_ix_i,$ where $c_i, i=0,\dots,d$ are parameters.
* using a set of $m$ basis functions (generalized linear model), $m(\mathbf{x}) = \sum_{i=1}^mc_i\phi_i(\mathbf{x})$, where $c_i$ and $\phi_i(\cdot)$ are parameters and basis functions.
* generalized polynomial chaos (gPC), using a set of $d$ polynomial basis functions upto a given degree $\rho$, $m(\mathbf{x}) = \sum_{i=1}^{d}c_i\phi_i(\mathbf{x})$ where the basis functions $\phi_i$ are mutually orthonormal: $$
\int \phi_{i}(\mathbf{x}) \phi_{j}(\mathbf{x}) dF(\mathbf{x}) = \delta_{ij}.
$$
Squared exponential covariance function
Squared exponential (SE) is widely used covariance function. Its has the form:
$$
k(\mathbf{x}, \mathbf{x}') = v\exp\left{-\frac{1}{2}\sum_{i=1}^d\frac{(x_i - x_i')^2}{\ell_i^2}\right},
$$
where
* $v>0$ – signal strength. The bigger it is, the more the GP $f(\mathbf{x})$ will vary about the mean.
* $\ell_i>0, i=1,\dots,d$ – length-scale of the $i$-th input dimension of the GP. The bigger it is, the smoother the samples of $f(\mathbf{x})$ appear along the $i$-th input dimension.
End of explanation
"""
|
diegocavalca/Studies
|
programming/Python/tensorflow/exercises/Graph.ipynb
|
cc0-1.0
|
# Q1. Create a graph
g = ...
with g.as_default():
# Define inputs
with tf.name_scope("inputs"):
a = tf.constant(2, tf.int32, name="a")
b = tf.constant(3, tf.int32, name="b")
# Ops
with tf.name_scope("ops"):
c = tf.multiply(a, b, name="c")
d = tf.add(a, b, name="d")
e = tf.subtract(c, d, name="e")
# Q2. Start a session
sess = ...
# Q3. Fetch c, d, e
_c, _d, _e = ...
print("c =", _c)
print("d =", _d)
print("e =", _e)
# Close the session
sess.close()
"""
Explanation: Q1-3. You are to implement the graph below. Complete the code.
<img src="figs/fig1.png",width=500>
End of explanation
"""
tf.reset_default_graph()
# Define inputs
a = tf.Variable(tf.random_uniform([]))
b_pl = tf.placeholder(tf.float32, [None])
# Ops
c = a * b_pl
d = a + b_pl
e = tf.reduce_sum(c)
f = tf.reduce_mean(d)
g = e - f
# initialize variable(s)
init = tf.global_variables_initializer()
# Update variable
update_op = tf.assign(a, a + g)
# Q4. Create a (summary) writer to `asset`
writer = ...
#Q5. Add `a` to summary.scalar
...
#Q6. Add `c` and `d` to summary.histogram
...
#Q7. Merge all summaries.
summaries = ...
# Start a session
sess = tf.Session()
# Initialize Variable(s)
sess.run(init)
# Fetch the value of c, d, and e.
for step in range(5):
_b = np.arange(10, dtype=np.float32)
_, summaries_proto = sess.run([update_op, summaries], {b_pl:_b})
# Q8. Attach summaries_proto to TensorBoard.
...
# Close the session
sess.close()
"""
Explanation: Q4-8. You are to implement the graph below. Complete the code.
<img src="figs/fig3.png",width=500>
End of explanation
"""
|
tjwei/HackNTU_Data_2017
|
Week11/DIY_AI/Softmax.ipynb
|
mit
|
# Weight
W = Matrix([1,2],[3,4], [5,6])
W
# Bias
b = Vector(1,0,-1)
b
# 輸入
x = Vector(2,-1)
x
"""
Explanation: Supervised learning for classification
給一堆 $x$, 和他的分類,我們找出計算 x 的分類的方式
One hot encoding
如果我們有三類種類別, 我們可以來編碼這三個類別
* $(1,0,0)$
* $(0,1,0)$
* $(0,0,1)$
問題
為什麼不直接用 1,2,3 這樣的編碼呢?
Softmax Regression 的模型是這樣的
我們的輸入 $x=\begin{pmatrix} x_0 \ x_1 \ x_2 \ x_3 \end{pmatrix} $ 是一個向量,我們看成 column vector 好了
而 Weight: $W = \begin{pmatrix} W_0 \ W_1 \ W_2 \end{pmatrix} =
\begin{pmatrix} W_{0,0} & W_{0,1} & W_{0,2} & W_{0,3}\
W_{1,0} & W_{1,1} & W_{1,2} & W_{1,3} \
W_{2,0} & W_{2,1} & W_{2,2} & W_{2,3} \end{pmatrix} $
Bias: $b=\begin{pmatrix} b_0 \ b_1 \ b_2 \end{pmatrix} $
我們先計算"線性輸出" $ c = \begin{pmatrix} c_0 \ c_1 \ c_2 \end{pmatrix} = Wx+b =
\begin{pmatrix} W_0 x + b_0 \ W_1 x + b_1 \ W_2 x + b_2 \end{pmatrix} $, 然後再取 $exp$ (逐項取)。 最後得到一個向量。
$d = \begin{pmatrix} d_0 \ d_1 \ d_2 \end{pmatrix} = e^{W x + b} = \begin{pmatrix} e^{c_0} \ e^{c_1} \ e^{c_2} \end{pmatrix}$
將這些數值除以他們的總和。
給定輸入 x, 我們希望算出來的數字 q_i 會符合 x 的類別是 i 的機率。
$q_i = Predict_{W,b}(Y=i|x) = \frac {e^{W_i x + b_i}} {\sum_j e^{W_j x + b_j}} = \frac {d_i} {\sum_j d_j}$
合起來看,就是 $q = \frac {d} {\sum_j d_j} $
問題
為什麼要用 $exp$?
先隨便算一個 $\mathbb{R}^2 \rightarrow \mathbb{R}^3$ 的網路
End of explanation
"""
# 請在這裡計算
# 參考答案
#%load solutions/softmax_compute_q.py
%run -i solutions/softmax_compute_q.py
# 顯示 q
q
"""
Explanation: 任務:計算最後的猜測機率 $q$
Hint: np.exp 可以算 $exp$
End of explanation
"""
# Hint 下面產生數字 i 的 2 進位向量
i = 13
x = Vector(i%2, (i>>1)%2, (i>>2)%2, (i>>3)%2)
x
# 請在這裡計算
# 參考答案
#%load solutions/softmax_mod4.py
"""
Explanation: 練習
設計一個網路:
* 輸入是二進位 0 ~ 15
* 輸出依照對於 4 的餘數分成四類
Hint: 可以參考上面 W, b 的設定方式
End of explanation
"""
# 請在這裡計算
# 參考答案
#%load solutions/softmax_mod3.py
"""
Explanation: 練習
設計一個網路:
* 輸入是二進位 0 ~ 15
* 輸出依照對於 3 的餘數分成三類
Hint: 不用全部正確,用猜的,但正確率要比亂猜高。可以利用統計的結果猜猜看。
End of explanation
"""
# 先產生隨機的 W 和 b
W = Matrix(np.random.normal(size=(3,4)))
b = Vector(np.random.normal(size=(3,)))
W
b
"""
Explanation: Gradient descent
誤差函數
為了要評斷我們的預測的品質,要設計一個評斷誤差的方式
假設輸入值 $x$ 對應到的真實類別是 $y$, 那我們定義誤差函數
$ loss = -\log(q_y)=- \log(Predict_{W,b}(Y=y|x)) $
這個方法叫做 Cross entropy
其實比較一般但比較複雜一點的寫法是
$ loss = - \sum_i p_i\log(q_i) = - p \cdot \log q$
其中 $i$ 是所有類別, 而 $ p_i = \Pr(Y=i|x) $ 是真實發生的機率
但我們目前 $x$ 對應到的真實類別是 $y$, 所以直接 $p_i = 1$
想辦法改進。
我們用一種被稱作是 gradient descent 的方式來改善我們的誤差。
因為我們知道 gradient 是讓函數上升最快的方向。所以我們如果朝 gradient 的反方向走一點點(也就是下降最快的方向),那麼得到的函數值應該會小一點。
記得我們的變數是 $W$ 和 $b$ (裡面有一堆 W_i,j b_i 這些變數),所以我們要把 $loss$ 對 $W$ 和 $b$ 裡面的每一個參數來偏微分。
還好這個偏微分是可以用手算出他的形式,而最後偏微分的式子也不會很複雜。
$loss$ 展開後可以寫成
$loss = -\log(q_y) = \log(\sum_j d_j) - d_i \
= \log(\sum_j e^{W_j x + b_j}) - W_i x - b_i$
注意 $d_j = e^{W_j x + b_j}$ 只有變數 $b_j, W_j$
對 $k \neq i$ 時, $loss$ 對 $b_k$ 的偏微分是
$$ \frac{e^{W_k x + b_k}}{\sum_j e^{W_j x + b_j}} = q_k$$
對 $k = i$ 時, $loss$ 對 $b_k$ 的偏微分是
$$ q_k - 1$$
對 $W$ 的偏微分也不難
對 $k \neq i$ 時, $loss$ 對 $W_{k,t}$ 的偏微分是
$$ \frac{e^{W_k x + b_k} x_t}{\sum_j e^{W_j x + b_j}} = q_k x_t$$
對 $k = i$ 時, $loss$ 對 $W_{k,t}$ 的偏微分是
$$ q_k x_t - x_t$$
實做部份
End of explanation
"""
i = 14
x = Vector(i%2, (i>>1)%2, (i>>2)%2, (i>>3)%2)
y = i%3
"""
Explanation: 問題
W, b 的 size 為什麼要這樣設定?
任務: 隨便設定一組 x, y, 我們來跑跑看 gradient descent
End of explanation
"""
# 請在這裡計算
# 參考答案(跟前面一樣)¶
#%load solutions/softmax_compute_q.py
%run -i solutions/softmax_compute_q.py
#顯示 q
q
"""
Explanation: 步驟:計算 q
End of explanation
"""
# 請在這裡計算
# 參考答案(跟前面一樣)
%run -i solutions/softmax_compute_loss1.py
#顯示 loss
loss
"""
Explanation: 步驟: 計算 loss
End of explanation
"""
# 請在這裡計算 grad_b
#參考答案
%run -i solutions/softmax_compute_grad_b.py
grad_b
"""
Explanation: 步驟:計算對 b 的 gradient
End of explanation
"""
# 請在這裡計算
#參考答案
%run -i solutions/softmax_compute_grad_W.py
grad_W
"""
Explanation: 步驟:計算對 W 的 gradient
End of explanation
"""
# 請在這裡計算
# 參考答案
%run -i solutions/softmax_update_Wb.py
# 原先的 q
q
# 原先的 loss
loss
# 現在的 loss
%run -i solutions/softmax_compute_q.py
%run -i solutions/softmax_compute_loss1.py
loss
q
"""
Explanation: 步驟:更新 W, b 各減掉 0.5 * gradient, 然後看看新的 loss 是否有進步了?
End of explanation
"""
X = np.array([Vector(i%2, (i>>1)%2, (i>>2)%2, (i>>3)%2) for i in range(16)])
for i in range(4):
print("i=", i)
display(X[i])
X
# 對應的組別
y = np.array([i%3 for i in range(16)])
y
"""
Explanation: 一次訓練多組資料
上面只針對一組 x (i=14) 來訓練,如果一次對所有 x 訓練呢?
通常我們會把組別放在 axis-0
End of explanation
"""
# 請在這裡計算
# 參考解答如後
"""
Explanation: 任務: 將訓練向量化
End of explanation
"""
d = np.exp(W @ X + b)
q = d/d.sum(axis=(1,2), keepdims=True)
q
"""
Explanation: 對照
python
d = np.exp(W @ x + b)
q = d/d.sum()
q
End of explanation
"""
loss = -np.log(q[range(len(y)), y])
loss
# 用平均當成我們真正的 loss
loss.mean()
"""
Explanation: 對照
python
loss = -np.log(q[y])
loss
End of explanation
"""
# fancy indexing :p
one_y = np.eye(3)[y][..., None]
grad_b_all = q - one_y
grad_b = grad_b_all.mean(axis=0)
grad_b
"""
Explanation: 對照
python
grad_b = q - np.eye(3)[y][:, None]
End of explanation
"""
grad_W_all = grad_b_all @ X.swapaxes(1,2)
grad_W = grad_W_all.mean(axis=0)
grad_W
W -= 0.5 * grad_W
b -= 0.5 * grad_b
# 之前的 loss
loss.mean()
d = np.exp(W @ X + b)
q = d/d.sum(axis=(1,2), keepdims=True)
loss = -np.log(q[range(len(y)), y])
loss.mean()
"""
Explanation: 對照
python
grad_W = grad_b @ x.T
End of explanation
"""
# 在這裡計算
# 參考答案
%run -i solutions/softmax_train.py
# 畫出 loss 的曲線
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(loss_history);
# 對答案
display((W @ X + b).argmax(axis=1).ravel())
display(y)
"""
Explanation: 任務:全部合在一起
設定 W,b
設定 X
訓練三十次
計算 q 和 loss
計算 grad_b 和 grad_W
更新 W, b
看看準確度
End of explanation
"""
|
sdpython/ensae_teaching_cs
|
_doc/notebooks/td1a_soft/td1a_cython_edit.ipynb
|
mit
|
from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: 1A.soft - Calcul numérique et Cython
Python est très lent. Il est possible d'écrire certains parties en C mais le dialogue entre les deux langages est fastidieux. Cython propose un mélange de C et Python qui accélère la conception.
End of explanation
"""
def racine_carree1(x) :
return x**0.5
%timeit -r 10 [ racine_carree1(x) for x in range(0,1000) ]
import math
def racine_carree2(x) :
return math.sqrt(x)
%timeit -r 10 [ racine_carree2(x) for x in range(0,1000) ]
"""
Explanation: Calcul numérique
On peut mesurer le temps que met en programme comme ceci (qui ne marche qu'avec IPython...timeit) :
End of explanation
"""
%timeit -r 10 [ x**0.5 for x in range(0,1000) ]
%timeit -r 10 [ math.sqrt(x) for x in range(0,1000) ]
"""
Explanation: La seconde fonction est plus rapide. Seconde vérification :
End of explanation
"""
%load_ext cython
"""
Explanation: On remarque également que l'appel à une fonction pour ensuite effectuer le calcul a coûté environ 100 $\mu s$ pour 1000 appels. L'instruction timeit effectue 10 boucles qui calcule 1000 fois une racine carrée.
Cython
Le module Cython est une façon d'accélérer les calculs en insérant dans un programme python du code écrit dans une syntaxe proche de celle du C. Il existe différentes approches pour accélérer un programme python :
Cython : on insère du code [C](http://fr.wikipedia.org/wiki/C_(langage) dans le programme python, on peut gagné un facteur 10 sur des fonctions qui utilisent des boucles de façon intensives.
autres alternatives :
cffi, il faut connaître le C (ne fait pas le C++)
pythran
numba
...
PyPy : on compile le programme python de façon statique au lieu de l'interpréter au fur et à mesure de l'exécution, cette solution n'est praticable que si on a déjà programmé avec un langage compilé ou plus exactement un langage où le typage est fort. Le langage python, parce qu'il autorise une variable à changer de type peut créer des problèmes d'inférence de type.
module implémenté en C : c'est le cas le plus fréquent et une des raisons pour lesquelles Python a été rapidement adopté. Beaucoup de librairies se sont ainsi retrouvées disponibles en Python. Néanmoins, l'API C du Python nécessite un investissement conséquent pour éviter les erreurs. Il est préférable de passer par des outils tels que
boost python : facile d'accès, le module sera disponible sous forme compilée,
SWIG : un peu plus difficile, le module sera soit compilé par la librairie soit packagé de telle sorte qu'il soit compilé lors de son l'installation.
Parmi les trois solutions, la première est la plus accessible, et en développement constant (Cython changes).
L'exemple qui suit ne peut pas fonctionner directement sous notebook car Cython compile un module (fichier *.pyd) avant de l'utiliser. Si la compilation ne fonctionne pas et fait apparaître un message avec unable for find file vcvarsall.bat, il vous faut lire l'article Build a Python 64 bit extension on Windows 8 après avoir noté la version de Visual Studio que vous utilisez. Il est préférable d'avoir programmé en C/C++ même si ce n'est pas indispensable.
Cython dans un notebook
Le module IPython propose une façon simplifiée de se servir de Cython illustrée ici : Some Linear Algebra with Cython. Vous trouverez plus bas la façon de faire sans IPython que nous n'utiliserons pas pour cette séance. On commence par les préliminaires à n'exécuter d'une fois :
End of explanation
"""
%%cython --annotate
cimport cython
def cprimes(int kmax):
cdef int n, k, i
cdef int p[1000]
result = []
if kmax > 1000:
kmax = 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p[k] = n
k = k + 1
result.append(n)
n = n + 1
return result
"""
Explanation: Puis on décrit la fonction avec la syntaxe Cython :
End of explanation
"""
%timeit [ cprimes (567) for i in range(10) ]
"""
Explanation: On termine en estimant son temps d'exécution. Il faut noter aussi que ce code ne peut pas être déplacé dans la section précédente qui doit être entièrement écrite en cython.
End of explanation
"""
def distance_edition(mot1, mot2):
dist = { (-1,-1): 0 }
for i,c in enumerate(mot1) :
dist[i,-1] = dist[i-1,-1] + 1
dist[-1,i] = dist[-1,i-1] + 1
for j,d in enumerate(mot2) :
opt = [ ]
if (i-1,j) in dist :
x = dist[i-1,j] + 1
opt.append(x)
if (i,j-1) in dist :
x = dist[i,j-1] + 1
opt.append(x)
if (i-1,j-1) in dist :
x = dist[i-1,j-1] + (1 if c != d else 0)
opt.append(x)
dist[i,j] = min(opt)
return dist[len(mot1)-1,len(mot2)-1]
%timeit distance_edition("idstzance","distances")
"""
Explanation: Exercice : python/C appliqué à une distance d'édition
La distance de Levenshtein aussi appelé distance d'édition calcule une distance entre deux séquences d'éléments. Elle s'applique en particulier à deux mots comme illustré par Distance d'édition et programmation dynamique. L'objectif est de modifier la fonction suivante de façon à utiliser Cython puis de comparer les temps d'exécution.
End of explanation
"""
code = """
def primes(int kmax):
cdef int n, k, i
cdef int p[1000]
result = []
if kmax > 1000:
kmax = 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p[k] = n
k = k + 1
result.append(n)
n = n + 1
return result
"""
name = "example_cython"
with open(name + ".pyx","w") as f : f.write(code)
setup_code = """
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("__NAME__.pyx",
compiler_directives={'language_level' : "3"})
)
""".replace("__NAME__",name)
with open("setup.py","w") as f:
f.write(setup_code)
"""
Explanation: Auparavant, il est probablement nécessaire de suivre ces indications :
Si vous souhaitez remplacer le dictionnaire par un tableau à deux dimensions, comme le langage C n'autorise pas la création de tableau de longueur variables, il faut allouer un pointeur (c'est du C par du C++). Toutefois, je déconseille cette solution :
Cython n'accepte pas les doubles pointeurs : How to declare 2D list in Cython, les pointeurs simples si Python list to Cython.
Cython n'est pas forcément compilé avec la même version que votre version du compilateur Visual Studio C++. Ce faisant, vous pourriez obtenir l'erreur warning C4273: 'round' : inconsistent dll linkage. Après la lecture de cet article, BUILDING PYTHON 3.3.4 WITH VISUAL STUDIO 2013, vous comprendrez que ce n'est pas si simple à résoudre.
Je suggère donc de remplacer dist par un tableau cdef int dist [500][500]. La signature de la fonction est la suivante : def cdistance_edition(str mot1, str mot2). Enfin, Cython a été optimisé pour une utilisation conjointe avec numpy, à chaque fois que vous avez le choix, il vaut mieux utiliser les container numpy plutôt que d'allouer de grands tableaux sur la pile des fonctions ou d'allouer soi-même ses propres pointeurs.
Cython sans les notebooks
Cette partie n'est utile que si vous avez l'intention d'utiliser Cython sans IPython. Les lignes suivantes implémentent toujours avec Cython la fonction primes qui retourne les entiers premiers entiers compris entre 1 et $N$. On suit maintenant la méthode préconisée dans le tutoriel de Cython. Il faut d'abord créer deux fichiers :
example_cython.pyx qui contient le code de la fonction
setup.py qui compile le module avec le compilateur Visual Studio C++
End of explanation
"""
import os
import sys
cmd = "{0} setup.py build_ext --inplace".format(sys.executable)
from pyquickhelper.loghelper import run_cmd
out,err = run_cmd(cmd)
if err != '' and err is not None:
raise Exception(err)
[ _ for _ in os.listdir(".") if "cython" in _ or "setup.py" in _ ]
"""
Explanation: Puis on compile le fichier .pyx créé en exécutant le fichier setup.py avec des paramètres précis :
End of explanation
"""
import pyximport
pyximport.install()
import example_cython
"""
Explanation: Puis on importe le module :
End of explanation
"""
%timeit [ example_cython.primes (567) for i in range(10) ]
"""
Explanation: Si votre dernière modification n'apparaît pas, il faut redémarrer le kernel. Lorsque Python importe le module example_cython la première fois, il charge le fichier example_cython.pyd. Lors d'une modification du module, ce fichier est bloqué en lecture et ne peut être modifié. Or cela est nécessaire car le module doit être recompilé. Pour cette raison, il est plus pratique d'implémenter sa fonction dans un éditeur de texte qui n'utilise pas IPython.
On teste le temps mis par la fonction primes :
End of explanation
"""
def py_primes(kmax):
p = [ 0 for _ in range(1000) ]
result = []
if kmax > 1000:
kmax = 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p[k] = n
k = k + 1
result.append(n)
n = n + 1
return result
%timeit [ py_primes (567) for i in range(10) ]
"""
Explanation: Puis on compare avec la version écrites un Python :
End of explanation
"""
|
tensorflow/docs-l10n
|
site/ja/tensorboard/get_started.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
# Load the TensorBoard notebook extension
%load_ext tensorboard
import tensorflow as tf
import datetime
# Clear any logs from previous runs
!rm -rf ./logs/
"""
Explanation: TensorBoard を使う
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/tensorboard/get_started"> <img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.org で表示</a>
</td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/tensorboard/get_started.ipynb"> <img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a>
</td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/tensorboard/get_started.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"> GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/tensorboard/get_started.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
機械学習では、何かを改善するには、それを測定できる必要があります。TensorBoard は機械学習ワークフローの際に必要な測定値と視覚化を提供するツールです。損失や精度などの実験メトリックのトラッキング、モデルグラフの視覚化、低次元空間への埋め込みの投影など、さまざまなことを行えます。
このクイックスタートでは、TensorBoard を素早く使用し始める方法を示します。このウェブサイトの他のガイドでは、ここには含まれていない具体的な機能について、より詳しく説明します。
End of explanation
"""
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_model():
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
"""
Explanation: MNIST データセットを例として使用しながら、データを正規化し、画像を 10 個のクラスに分類する単純な Keras モデルを作成する関数を記述します。
End of explanation
"""
model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs=5,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
"""
Explanation: Keras Model.fit() で TensorBoard を使用する
Keras の Model.fit() を使用すると、tf.keras.callbacks.TensorBoard コールバックによってログが作成され、保存されたこと保証できます。さらに、histogram_freq=1(デフォルトでは無効)でエポックごとにヒストグラム計算を行えます。
トレーニング実行を簡単に選択できるように、ログをタイムスタンプ付きのサブディレクトリに配置しましょう。
End of explanation
"""
%tensorboard --logdir logs/fit
"""
Explanation: コマンドラインから、またはノートブックエクスペリエンス内で TensorBoard を起動します。2 つのインターフェースはほぼ同じです。ノートブックでは、%tensorboard ラインマジックを使用しますが、コマンドラインでは同じコマンドを「%」をつけずに実行します。
End of explanation
"""
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(60000).batch(64)
test_dataset = test_dataset.batch(64)
"""
Explanation: <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/quickstart_model_fit.png?raw=1"/> -->
ダッシュボード(トップナビゲーションバーのタブ)を簡単に説明します。
Scalars ダッシュボードには、エポックごとの損失とメトリックの変化が示されます。これを使用して、トレーニング速度、学習率、およびその他のスカラー値をトラッキングすることもできます。
Graphs ダッシュボードには、モデルが視覚化されます。このケースではレイヤーの Keras グラフが示されており、正しく構築されていることを確認しやすくなります。
Distributions と Histograms ダッシュボードには、経時的なテンソルの分布が示されます。重みとバイアスを視覚化し、期待する方法で変化していることを確認する上で役立ちます。
ほかの種類のデータをログ記録すると、追加の TensorBoard プラグインが自動的に有効化されます。たとえば、Keras TensorBoard コールバックの場合は、画像と埋め込みもログ記録することができます。TensorBoard で利用できるほかのプラグインを確認するには、右上の方にある「inactive」ドロップダウンをクリックしてください。
ほかのメソッドで TensorBoard を使用する
tf.GradientTape() などのメソッドでトレーニングする場合に必要な情報をログ記録するには、tf.summary を使用します。
上記と同じデータセットを使用しますが、tf.data.Dataset に変換して、バッチ機能を利用しましょう。
End of explanation
"""
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
"""
Explanation: トレーニングコードは高度なクイックスタートチュートリアルの内容に従いますが、メトリックを TensorBoard にログ記録する方法が示されています。損失とオプティマイザを選択してください。
End of explanation
"""
# Define our metrics
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
"""
Explanation: トレーニング中の値を蓄積するために使用し、任意の時点でログ記録できるステートフルメトリックを作成します。
End of explanation
"""
def train_step(model, optimizer, x_train, y_train):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss = loss_object(y_train, predictions)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
train_accuracy(y_train, predictions)
def test_step(model, x_test, y_test):
predictions = model(x_test)
loss = loss_object(y_test, predictions)
test_loss(loss)
test_accuracy(y_test, predictions)
"""
Explanation: トレーニングとテストの関数を定義します。
End of explanation
"""
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
"""
Explanation: logs ディレクトリに要約をディスクに書き込むためのサマリーライターをセットアップします。
End of explanation
"""
model = create_model() # reset our model
EPOCHS = 5
for epoch in range(EPOCHS):
for (x_train, y_train) in train_dataset:
train_step(model, optimizer, x_train, y_train)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
for (x_test, y_test) in test_dataset:
test_step(model, x_test, y_test)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset metrics every epoch
train_loss.reset_states()
test_loss.reset_states()
train_accuracy.reset_states()
test_accuracy.reset_states()
"""
Explanation: トレーニングを開始します。サマリーライターがディスクに要約を書き込めるように、tf.summary.scalar() を使用して、トレーニング/テスト中のメトリック(損失と精度)をログします。ログするメトリックとその頻度は、ユーザーが制御します。ほかの tf.summary 関数は、ほかの種類のデータのログに使用されます。
End of explanation
"""
%tensorboard --logdir logs/gradient_tape
"""
Explanation: TensorBoard をもう一度開き、新しいログディレクトリにポイントします。また、TensorBoard を起動してトレーニングの経過を監視することもできます。
End of explanation
"""
!tensorboard dev upload \
--logdir logs/fit \
--name "(optional) My latest experiment" \
--description "(optional) Simple comparison of several hyperparameters" \
--one_shot
"""
Explanation: <!-- <img class="tfo-display-only-on-site" src="https://github.com/tensorflow/tensorboard/blob/master/docs/images/quickstart_gradient_tape.png?raw=1"/> -->
これで完了です!Keras コールバックと、ほかのカスタムシナリオで使用できる tf.summary を使って TensorBoardを使用する方法を確認しました。
TensorBoard.dev: ML 実験結果のホストと共有
TensorBoard.dev は無料の一般公開サービスで、TensorBoard ログをアップロードし、学術論文、ブログ投稿、ソーシャルメディアなどでの共有に使用するパーマリンクを取得することができます。このサービスにより、再現性と共同作業をさらに改善することができます。
TensorBoard.dev を使用するには、次のコマンドを実行してください。
End of explanation
"""
|
Oli4/lsi-material
|
Foundations of Information Management/Sheet 3 - RA operators in SQL and SQL queries.ipynb
|
mit
|
import sqlite3
conn = sqlite3.connect('movie.db')
cur = conn.cursor()
"""
Explanation: Questions 1
(RA operators in SQL). Transform the following relational algebra expressions from the first exercise sheet into equivalent SQL queries.
Question 2
End of explanation
"""
cur.execute('''SELECT genre
FROM film
WHERE title="Titanic"''')
for row in cur.fetchall():
print(row[0])
"""
Explanation: a) What is the genre of “Titanic”?
End of explanation
"""
cur.execute('''SELECT id, firstname, name
FROM person
WHERE name='Jolie'
AND firstname='Angelina' ''')
for row in cur.fetchall():
print(row[0], row[1], row[2])
"""
Explanation: b) What is the ID of Angelina Jolie?
End of explanation
"""
cur.execute('''SELECT title
FROM film
WHERE year=2010
AND genre LIKE "%Action%"''')
for row in cur.fetchall():
print(row[0])
"""
Explanation: c) Which action movies were produced in 2010?
End of explanation
"""
cur.execute('''SELECT name
FROM cinema
WHERE city="Bonn"''')
for row in cur.fetchall():
print(row[0])
"""
Explanation: d) List all cinemas located in Bonn.
End of explanation
"""
cur.execute('''SELECT firstname, name
FROM person, participation
WHERE participation.person = person.id
AND function="actor"''')
for row in cur.fetchall()[:10]:
print(row[0], row[1])
"""
Explanation: e) List all actors who acted in at least one movie.
End of explanation
"""
cur.execute('''SELECT firstname, name
FROM person, participation, film
WHERE participation.person = person.id
AND participation.function="director"
AND participation.film = film.id
AND film.title="Titanic"''')
for row in cur.fetchall():
print(row[0], row[1])
"""
Explanation: f) Who directed the film “Titanic”?
End of explanation
"""
cur.execute('''SELECT cinema.name, cinema.city
FROM cinema, show, film
WHERE show.date > '1999-'
AND film.title="Inferno"
AND show.film = film.id
AND show.cinema = cinema.id''')
for row in cur.fetchall():
print(row[0], row[1])
"""
Explanation: g) Where can you watch the new Dan Brown film “Inferno”?
As the dates in the SHOW table are randomly chosen between 1980 and 2016, every cinema shows ever film only once in this time and there are only cinemas from Bonn and Cologne in the cinema table there are not to many matches.
End of explanation
"""
cur.execute('''SELECT person.firstname, person.name
FROM person, participation, film
WHERE film.title = "Inferno"
AND participation.film = film.id
AND participation.person = person.id''')
for row in cur.fetchall():
print(row[0], row[1])
conn.close()
"""
Explanation: h) Who participated in “Inferno”?
End of explanation
"""
|
namiszh/fba
|
notebooks/yahoo-api-example.ipynb
|
mit
|
from rauth import OAuth2Service
import webbrowser
import json
"""
Explanation: Yahoo API Example
This notebook is an example of using yahoo api to get fantasy sports data.
End of explanation
"""
clientId= "dj0yJmk9M3gzSWJZYzFmTWZtJmQ9WVdrOU9YcGxTMHB4TXpnbWNHbzlNQS0tJnM9Y29uc3VtZXJzZWNyZXQmeD1kZg--"
clinetSecrect="dbd101e179b3d129668965de65d05c02df42333d"
"""
Explanation: Prerequisite
First we need to create a Yahoo APP at https://developer.yahoo.com/apps/, and select Fantasy Sports - Read for API Permissions. Then we can get the Client ID (Consumer Key) and Client Secret (Consumer Secret)
End of explanation
"""
oauth = OAuth2Service(client_id = clientId,
client_secret = clinetSecrect,
name = "yahoo",
access_token_url = "https://api.login.yahoo.com/oauth2/get_token",
authorize_url = "https://api.login.yahoo.com/oauth2/request_auth",
base_url = "http://fantasysports.yahooapis.com/fantasy/v2/")
"""
Explanation: Step 1: Create an OAuth object
End of explanation
"""
params = {
'response_type': 'code',
'redirect_uri': 'oob'
}
authorize_url = oauth.get_authorize_url(**params)
webbrowser.open(authorize_url)
code = input('Enter code: ')
"""
Explanation: Step 2: Generate authorize url, and then get the verify code
For this script, the redirect_uri is set to 'oob',and open a page in brower to get the verify code.
For Web APP server, we can set redirect uri as callback domain during Yahoo APP creation.
End of explanation
"""
data = {
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': 'oob'
}
oauth_session = oauth.get_auth_session(data=data,
decoder= lambda payload : json.loads(payload.decode('utf-8')))
"""
Explanation: Step 3: Get session with the code
End of explanation
"""
user_url='https://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1'
resp = oauth_session.get(user_url, params={'format': 'json'})
resp.json()
user_guid=resp.json()['fantasy_content']['users']['0']['user'][0]['guid']
user_guid
"""
Explanation: Example to get user Info
End of explanation
"""
team_url = 'https://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1/games;game_keys=nba/teams'
resp = oauth_session.get(team_url, params={'format': 'json'})
teams = resp.json()['fantasy_content']['users']['0']['user'][1]['games']['0']['game'][1]['teams']
teams
team_count = int(teams['count'])
team_count
for idx in range(0,team_count):
team = teams[str(idx)]['team'][0][19]['managers']
print(team, '\n')
"""
Explanation: Example to query nba teams of logged user.
End of explanation
"""
league_url = 'https://fantasysports.yahooapis.com/fantasy/v2/users;use_login=1/games;game_keys=nba/leagues'
resp = oauth_session.get(league_url, params={'format': 'json'})
leagues = resp.json()['fantasy_content']['users']['0']['user'][1]['games']['0']['game'][1]['leagues']
leagues
league_count = int(leagues['count'])
league_count
for idx in range(0,league_count):
league = leagues[str(idx)]['league'][0]
print(league, '\n')
"""
Explanation: Example to get nba leagues of logged user
End of explanation
"""
settings_url = 'https://fantasysports.yahooapis.com/fantasy/v2/game/nba/leagues;league_keys=375.l.1039/settings'
resp = oauth_session.get(settings_url, params={'format': 'json'})
settings = resp.json()['fantasy_content']['game'][1]['leagues']['0']['league'][1]['settings'][0]
settings
stat_categories = settings['stat_categories']['stats']
for category in stat_categories:
print(category['stat'], '\n')
"""
Explanation: Example to get league metadata
End of explanation
"""
teams_url = 'https://fantasysports.yahooapis.com/fantasy/v2/league/375.l.573/teams'
resp = oauth_session.get(teams_url, params={'format': 'json'})
league_teams = resp.json()['fantasy_content']['league'][1]['teams']
league_teams
league_team_count = int(league_teams['count'])
league_team_count
for idx in range(0,league_team_count):
league_team = league_teams[str(idx)]['team'][0]
print(league_team, '\n')
team_logo = league_team[5]['team_logos'][0]['team_logo']['url']
# print('team_log', team_logo)
"""
Explanation: Get all teams of a league
End of explanation
"""
stat_url = 'https://fantasysports.yahooapis.com/fantasy/v2/team/375.l.1039.t.17/stats;type=week;week=2'
resp = oauth_session.get(stat_url, params={'format': 'json'})
team_stats = resp.json()['fantasy_content']['team'][1]['team_stats']['stats']
team_stats
"""
Explanation: Example to get team stats of week 2
End of explanation
"""
stat_url = 'https://fantasysports.yahooapis.com/fantasy/v2/team/375.l.1039.t.17/stats'
resp = oauth_session.get(stat_url, params={'format': 'json'})
team_stats = resp.json()['fantasy_content']['team'][1]['team_stats']['stats']
team_stats
"""
Explanation: Example to get team stats of whole season
End of explanation
"""
stat_url = 'https://fantasysports.yahooapis.com/fantasy/v2/game/nba/stat_categories'
resp = oauth_session.get(stat_url, params={'format': 'json'})
stat_categories = resp.json()['fantasy_content']['game'][1]['stat_categories']['stats']
stat_categories
"""
Explanation: Example to get game stat categories
End of explanation
"""
|
azubiolo/itstep
|
it_step/ml_from_scratch/4_least-squares_continued/least-squares.ipynb
|
mit
|
X = [[1., 50.], [1., 76.], [1., 26.], [1., 102.]]
Y = [30., 48., 12., 90.]
# Y[3] = 300 # Outlier. Uncomment this line if you want to introduce an outlier.
"""
Explanation: Ordinary Least Squares -- Part II
Course recap
This lab consists in implementing the Ordinary Least Squares (OLS) algorithm, which is a linear regression with a least-squares penalty. Given a training set $ D = \left{ \left(x^{(i)}, y^{(i)}\right), x^{(i)} \in \mathcal{X}, y^{(i)} \in \mathcal{Y}, i \in {1, \dots, n } \right}$, recall (from lectures 1 and 2) OLS aims at minimizing the following cost function $J$:
$$J(\theta) = \dfrac{1}{2} \sum_{i = 1}^{n} \left( h\left(x^{(i)}\right) - y^{(i)} \right)^2$$
where
$$h(x) = \sum_{j = 0}^{d} \theta_j x_j = \theta^T x.$$
For the sake of simplicity, we will be working on a small training set (the one we used in lectures 1 and 2):
| living area (m$^2$) | price (1000's BGN)|
|--------------------:|------------------:|
| 50 | 30 |
| 76 | 48 |
| 26 | 12 |
| 102 | 90 |
Defining the training set
Exercise 1: Define variables X and Y that will contain the features $\mathcal{X}$ and labels $\mathcal{Y}$ of the training set.
Hint: Do not forget the intercept!
End of explanation
"""
1/2
"""
Explanation: In this simple example, the dimensionality is $d = 1$ (which means 2 features: don't forget the intercept!) and the number of samples is $n = 4$.
Remark: 1. is written instead of 1 in order to avoid integers operations. For example, in some languages (including Python 2), the result of 1/2 is 0 and not 0.5:
End of explanation
"""
1./2
"""
Explanation: Instead, writing 1./2 forces a float operation and gives 0.5 as a result, which is what we want:
End of explanation
"""
def predict(x, theta):
y_hat = x[0] * theta[0] + x[1] * theta[1] # compute the dot product between 'theta' and 'x'
return y_hat
"""
Explanation: Prediction function
Exercise: Define a function predict that takes as parameter the feature vector $x$ and the model $\theta$ and returns the predicted label:
$$ \hat{y} = h(x) = \theta^T x = \sum_{j = 0}^d \theta_j x_j$$
End of explanation
"""
def cost_function(y, y_hat):
loss = (y - y_hat) ** 2 / 2
return loss
"""
Explanation: Defining the cost function
Cost function on a single sample
Exercise: Define a function cost_function that takes as parameter the predicted label $y$ and the actual label $\hat{y}$ of a single sample and returns the value of the cost function for this pair. Recall from lectures 1 and 2 that it is given by:
$$ \ell \left( y, \hat{y} \right) = \dfrac{1}{2}\left( y - \hat{y} \right)^2$$
End of explanation
"""
def cost_function_total(X, Y, theta):
cost = 0 # initialize the cost with 0
n = len(Y)
for i in range(n): # iterate over the training set (n = 4 in our case)
x = X[i] # get the ith feature vector
y = Y[i] # get the ith label
y_hat = predict(x, theta) # predict the ith label
cost += cost_function(y, y_hat) # add the cost of the current sample to the total cost
return cost
"""
Explanation: Cost function on the whole training set
We are now able to compute the cost function for a single sample. We can easily compute the cost function for the whole training set by summing the cost function values for all the samples in the training set. Recall that the total cost function is given by:
$$J(\theta) = \dfrac{1}{2} \sum_{i = 1}^{n} \left( h\left(x^{(i)}\right) - y^{(i)} \right)^2$$
where, for all $i \in { 1, \dots, n }$
$$h\left(x^{(i)}\right) = \sum_{j = 0}^{d} \theta_j x^{(i)}_j = \theta^T x^{(i)}$$
is the prediction of $x$ given the model $\theta$.
End of explanation
"""
theta_0 = [0, 0]
cost_function_total(X, Y, theta_0)
"""
Explanation: Let's now test the code written above and check the total cost function we would have when $\theta = [0, 0]$.
End of explanation
"""
def gradient(x, y, theta):
grad = [0, 0]
grad[0] = (predict(x, theta) - y) * x[0] # first value of the gradient
grad[1] = (predict(x, theta) - y) * x[1] # second value of the gradient
return grad
"""
Explanation: Note that this error is big, which is expectable because having $\theta = [0, 0]$ means always predicting $\hat{y} = 0$.
Defining the gradient of the cost function
Gradient on a single sample
Exercise: Define a function gradient that implements the gradient of the cost function for a given sample $(x, y)$. Recall from the lectures 1 and 2 that the gradient is given by:
$$\nabla J(\theta) = \left[ \dfrac{\partial}{\partial \theta_1} J(\theta), \dots, \dfrac{\partial}{\partial \theta_d} J(\theta) \right]^T$$
where, for all $j \in {0, \dots, d }$:
$$ \dfrac{\partial}{\partial \theta_j} J(\theta) = \left( h\left(x\right) - y \right) x_j. $$
Hint: Recall that $d = 1$, hence the gradient is of size $2$ (one value for $j = 0$, and another one for $j = 1$). Its two values are given by:
$$ \dfrac{\partial}{\partial \theta_0} J(\theta) = \left( h\left(x\right) - y \right) x_0 \quad \text{and} \quad
\dfrac{\partial}{\partial \theta_1} J(\theta) = \left( h\left(x\right) - y \right) x_1. $$
End of explanation
"""
gradient(X[0], Y[0], theta_0)
"""
Explanation: Let's try the gradient function on a simple example ($\theta = [0, 0]$ on the first sample of the training set, i.e. $\left(x^{(0)}, y^{(0)}\right)$).
End of explanation
"""
def gradient_total(X, Y, theta):
grad_total = [0, 0] # initialize the gradient with zeros
n = len(Y)
for i in range(n): # iterate over the training set
x = X[i] # get the ith feature vector
y = Y[i] # get the ith label
grad = gradient(x, y, theta) # predict the ith label given 'theta'
grad_total[0] += grad[0] # add the gradient corresponding to theta[0]
grad_total[1] += grad[1] # add the gradient corresponding to theta[1]
return grad_total
"""
Explanation: Gradient on the whole training set
Now we are able to compute the gradient of the cost function on a single sample, we can easily compute gradient_total, the gradient of the cost function on the whole training set by summing the gradients for all the samples in the training set.
End of explanation
"""
gradient_total(X, Y, theta_0)
"""
Explanation: Let's now test the code written above and check the total gradient we would have when $\theta = [0, 0]$
End of explanation
"""
def gradient_descent_step(X, Y, theta, alpha):
theta_updated = [0, 0]
grad = gradient_total(X, Y, theta)
theta_updated[0] = theta[0] - alpha * grad[0]
theta_updated[1] = theta[1] - alpha * grad[1]
return theta_updated
"""
Explanation: Question: What is the sign of the gradient values? What would it mean if we had such a gradient when applying a gradient descent?
Hint: Recall the gradient descent update:
$$\theta_j := \theta_j - \alpha \dfrac{\partial}{\partial \theta_j} J(\theta) \quad \text{for all } j \in {0, \dots, d }$$
Answer: Both values are negative, which means this gradient step would increase the value of $\theta$ due to fact we substract the gradient. This makes sense, because:
- we start with $\theta = [0, 0]$,
- we expect $\theta_0 > 0$ and $\theta_1 > 0$ because otherwise we could predict a negative price.
Applying a gradient descent
Gradient descent step implementation
We now have all the building blocs needed for the gradient descent algorithm, that is:
- The loss function
- The gradient
Indeed, the iterative update scheme of this algorithm is given by the following formula:
$$\theta_j := \theta_j - \alpha \dfrac{\partial}{\partial \theta_j} J(\theta)$$
for all $j \in {0, \dots, d }$. Recall that $\alpha$ is a parameter called the learning rate (or step size).
Exercise: Define a function called gradient_descent_step that performs an update on theta by applying the formula above.
End of explanation
"""
alpha = 0.0001
theta_1 = gradient_descent_step(X, Y, theta_0, alpha)
theta_2 = gradient_descent_step(X, Y, theta_1, alpha)
theta_2
"""
Explanation: Try to run a few iterations manually. Play with the value of $\alpha$ to see how it impacts the algorithm.
End of explanation
"""
def gradient_descent(X, Y, alpha):
theta = [0, 0] # initializing theta with zeros (it can be initialized in another manner)
n_iteration_max = 100
loss_history = []
for i_iteration in range(n_iteration_max):
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Iteration {:>2}. Current loss = {}".format(i_iteration, loss))
theta = gradient_descent_step(X, Y, theta, alpha)
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Optimization complete. Final loss = {}".format(loss))
return theta, loss_history
"""
Explanation: Iterating gradient descent steps
The gradient_descent_step implements a single gradient step of the gradient descent algorithm. Implement a function called gradient_descent that starts from a given $\theta$ (exaple $\theta = [0, 0]$) and applies 100 gradient descent iterations. Display the total cost function $J(\theta)$ at each iteration.
End of explanation
"""
theta_trained_gd, loss_history = gradient_descent(X, Y, alpha)
"""
Explanation: Play with the code you've just run. Try different values of $\alpha$ and see the impact it has.
Note: $\alpha = 0.0001$ works well in this case.
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(loss_history)
plt.ylabel('loss')
plt.show()
theta_trained_gd
"""
Explanation: Let's have a more visual interpretation by plotting the loss over iterations.
End of explanation
"""
houses_sizes = range(150)
estimated_prices = [theta_trained_gd[0] + house_size * theta_trained_gd[1] for house_size in houses_sizes]
x1s = [x[1] for x in X]
plt.plot(houses_sizes, estimated_prices)
plt.ylabel("price")
plt.xlabel("house size")
plt.scatter(x1s, Y, color="red")
plt.show()
"""
Explanation: Let us see what the trained regression looks like.
End of explanation
"""
def stochastic_gradient_descent_step(x, y, theta, alpha):
theta_updated = [0, 0]
grad = gradient(x, y, theta)
theta_updated[0] = theta[0] - alpha * grad[0]
theta_updated[1] = theta[1] - alpha * grad[1]
return theta_updated
"""
Explanation: Question: Looks at the evolution of the cost function over time. What comment can you make?
Answer: The loss function constantly drops over time with this initial value of $\theta$ and this $\alpha$. It ends up reaching a plateau at around 186. It seems like the algorithm has converged to the optimal value of the cost function.
Question: What does the value theta_trained represent?
Answer: Recall that the model $\theta$ has to values, $\theta_0$ and $\theta_1$. Hence, with the model theta_trained we've learnt, price prediction would be:
$$ \text{price} = \theta_0 + \theta_1 \times \text{area}.$$
Batch gradient descent vs. stochastic gradient descent
As we have seen during the lecture 1, the gradient descent methods are often split into 2 different subfamilies:
- Batch methods update $\theta$ after having computed the gradient on the whole training set
- Stochastic methods update $\theta$ after having computed the gradient on a single sample
The gradient descent we have implemented above (gradient_descent_step and gradient_descent) corresponds to the batch version because it sums the gradient of all the samples in the training set.
Exercise: Try to implement the stochastic version of the gradient descent algorithm. You will need to define a function stochastic_gradient_descent_step that compute a stochastic gradient step (on a single $(x, y)$ sample) and a function stochastic_gradient_descent iterates 100 stochastic gradient descent steps and returns the trained model $\theta$.
Solution: We first define the function stochastic_gradient_descent_step that implements a stochastic gradient step.
End of explanation
"""
def stochastic_gradient_descent(X, Y, alpha):
theta = [0, 0] # initializing theta with zeros (it can be initialized in another manner)
n_iteration_max = 100
loss_history = []
n_samples = len(Y)
for i_iteration in range(n_iteration_max):
i_sample = i_iteration % n_samples
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Iteration {:>2}. Current loss = {}".format(i_iteration, loss))
theta = stochastic_gradient_descent_step(X[i_sample], Y[i_sample], theta, alpha) # run the gradient update on a single sample
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Optimization complete. Final loss = {}".format(loss))
return theta, loss_history
"""
Explanation: Then the stochastic_gradient_descent function that will do the iterations.
End of explanation
"""
theta_trained_sgd, loss_history_sgd = stochastic_gradient_descent(X, Y, alpha)
"""
Explanation: Let's now apply the algorithm with the same parameters.
End of explanation
"""
plt.plot(loss_history_sgd)
plt.ylabel('loss')
plt.show()
"""
Explanation: Here again, let's plot the loss history over iterations.
End of explanation
"""
houses_sizes = range(150)
estimated_prices = [theta_trained_sgd[0] + house_size * theta_trained_sgd[1] for house_size in houses_sizes]
x1s = [x[1] for x in X]
plt.plot(houses_sizes, estimated_prices)
plt.ylabel("price")
plt.xlabel("house size")
plt.scatter(x1s, Y, color="red")
plt.show()
"""
Explanation: Again, let's look at the regression we obtain.
End of explanation
"""
import numpy as np
dtheta = 0.001
theta0_vector = np.arange(-1,+1, dtheta)
theta1_vector = np.arange(-1,+1, dtheta)
"""
Explanation: Question: Compare the results obtained when solving the OLS problem with stochastic gradient descent and batch gradient descent. Are the results the same? Why?
Hint: Plot the loss function.
Answer: The results are not the same, the loss function value differs a lot: More than 200 for the stochastic gradient descent, and about 186 for the batch gradient descent version.
To understand what happens, let's look at what the function looks like. It is a function of 2 variables ($\theta_0$ and $\theta_1$), so we can use either a 3D plot or a contour plot for visualize it.
The following code shows the contour plot. First, we need to define a grid of $\theta$ values on which we will evaluate the loss function. theta0_vector and theta1_vector define the values of $\theta_0$ and $\theta_1$ on which the function will be evaluated.
End of explanation
"""
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(X, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
plt.show()
"""
Explanation: Let's compute the value of the loss function for each pair $(\theta_0, \theta_1)$.
End of explanation
"""
dtheta = 1.
# from -100 to +100 on both axis
theta0_vector = np.arange(-100,100, dtheta)
theta1_vector = np.arange(-100,100, dtheta)
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(X, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
plt.show()
"""
Explanation: We can look at what the function from "further away":
End of explanation
"""
x_min = X[0][1]
x_max = X[0][1]
for i in range(len(X)):
x_min = min(x_min, X[i][1])
x_max = max(x_max, X[i][1])
def rescale(x):
return (x - x_min)/(x_max - x_min)
"""
Explanation: It appears that the function behaves very differently on the two different axis:
- It varies very fast on the horizontal axis
- It varies very slowly on the vertical axis
Question: What could this be due to? How could we solve this issue?
Answer: This is due to the fact that the two features (the size and the intercept) are on very different scales: the intercept is constantly equal to 1, while the size of the house varies on a much wider range (up to 102). In our algorithm, we make no difference between $\theta_0$ and $\theta_1$: the same step-size is
A way to address this issue is to normalize (or rescale) the features. There are several ways to do it:
1. The first way consists in, for each feature, dividing all the feature values by its maximal value. In the case of the house size, it would consist in dividing all the house sizes by 102, so that the biggest house would have a size of 1.
2. Another way consists in computing the $z$-score of the feature:
$$ z = \dfrac{x - x_{\min}}{x_{\max} - x_{\min}} $$
so that the feature respectively has 0 and 1 as minimum and maximum value. Note that this does not apply to the intercept $x_0$, because it is constantly equal to $1$.
Homeworks:
1. Implement the feature normalization of your choice. Run the OLS algorithm on it, and compare the result with the non-normalized regression.
2. The gradient descent we have implemented seems to lower the loss smoothly. The impelmentation we proposed did 100 iterations, and we can see that the loss function values is not changing much after 40-50 iterations. Could you think of a way to stop the algorithm to avoid having too many iterations giving a very marginal gain in the loss function?
3. Add an outlier to the training set (for example, a house with a normal size but a very small or very big price), and run the OLS algorithm on it. What impact does the outlier have on the quality of the regression? How to correct this issue?
In the next session, we will talk about regularization and how to define a more complex model when the data is not linearly separable.
Let's apply the following transformation:
$$ z = \dfrac{x - x_{\min}}{x_{\max} - x_{\min}} $$
To do so, we need to compute the min and max values of $X$. Let's also define a function that computes $z$ given $x$
End of explanation
"""
Z = [[x[0], rescale(x[1])] for x in X]
print(Z)
"""
Explanation: Let's now apply this transformation to all the element of $X$.
Remark: We keep the intercept as it is, without applying the transformation to it.
End of explanation
"""
dtheta = 1.
# from -100 to +100 on both axis
theta0_vector = np.arange(-100,100, dtheta)
theta1_vector = np.arange(-100,100, dtheta)
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(Z, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
plt.show()
"""
Explanation: The intercept remains unchanged, and the second value of each instance (which corresponds to the area of the house) is between 0 and 1.
End of explanation
"""
def gradient_descent(X, Y, alpha):
theta = [0, 0] # initializing theta with zeros (it can be initialized in another manner)
n_iteration_max = 100
loss_history = []
theta_history = [theta]
for i_iteration in range(n_iteration_max):
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
# print("Iteration {:>2}. Current loss = {}".format(i_iteration, loss))
theta = gradient_descent_step(X, Y, theta, alpha)
theta_history.append(theta)
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Optimization complete. Final loss = {}".format(loss))
plt.plot(loss_history)
plt.ylabel('loss')
plt.xlabel('iterations')
plt.show()
return theta_history, loss_history
alpha = 0.0001
theta_history_gd, loss_history_gd = gradient_descent(Z, Y, alpha)
"""
Explanation: We can now apply the gradient algorithm we've defined previously.
End of explanation
"""
alpha = 1.
theta_history_gd, loss_history_gd = gradient_descent(Z, Y, alpha)
"""
Explanation: The loss is constantly going down, and does not seem to have reached its minimal value. This is because we chose $\alpha = 0.0001$, which isn't appropriate in this case. $\alpha$ is way too small, hence we are doing tiny steps and it would take a long time for the algorithm to converge.
Note: As previously, the chosen value for $\alpha$ is still important, if we take $\alpha = 1$, the algorithm would diverge:
End of explanation
"""
alpha = 0.1
theta_history_gd, loss_history = gradient_descent(Z, Y, alpha)
"""
Explanation: Instead, let us try with a higher value, say $\alpha = 0.1$.
End of explanation
"""
dtheta = 1.
# from -100 to +100 on both axis
theta0_vector = np.arange(0,100, dtheta)
theta1_vector = np.arange(0,100, dtheta)
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(Z, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
for theta in theta_history_gd:
plt.scatter(theta[1], theta[0], color="red")
plt.show()
"""
Explanation: $\alpha = 0.1$ seems to work much better, the loss function is decreasing fast at the beginning and seems to reach a loss function value of about 77.
Note: This loss function value is different than the one we used to have without rescaling the $x$ value (77 vs. 186). This is completely normal, because rescaling $X$ into $Z$ changes the loss function value, which becomes
$$J(\theta) = \dfrac{1}{2} \sum_{i = 1}^{n} \left( h\left(z^{(i)}\right) - y^{(i)} \right)^2$$
instead of:
$$J(\theta) = \dfrac{1}{2} \sum_{i = 1}^{n} \left( h\left(x^{(i)}\right) - y^{(i)} \right)^2$$
($x^{(i)}$ is replaced by $z^{(i)}$ in the formula). As a consequence, having 77 rather than 186 doesn't mean we're doing a better job than before!
End of explanation
"""
def stochastic_gradient_descent(X, Y, alpha):
theta = [0, 0] # initializing theta with zeros (it can be initialized in another manner)
n_iteration_max = 100
loss_history = []
n_samples = len(Y)
theta_history = [theta]
for i_iteration in range(n_iteration_max):
i_sample = i_iteration % n_samples
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Iteration {:>2}. Current loss = {}".format(i_iteration, loss))
theta = stochastic_gradient_descent_step(X[i_sample], Y[i_sample], theta, alpha) # run the gradient update on a single sample
theta_history.append(theta)
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Optimization complete. Final loss = {}".format(loss))
return theta_history, loss_history
alpha = .5
theta_history_sgd, loss_history_sgd = stochastic_gradient_descent(Z, Y, alpha)
plt.plot(loss_history_sgd)
plt.ylabel('loss')
plt.show()
"""
Explanation: Let's do the same for stochastic gradient descent:
End of explanation
"""
dtheta = 1.
# from -100 to +100 on both axis
theta0_vector = np.arange(0,100, dtheta)
theta1_vector = np.arange(0,100, dtheta)
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(Z, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
for theta in theta_history_sgd:
plt.scatter(theta[1], theta[0], color="red")
plt.show()
"""
Explanation: As it was the case previously, stochastic gradient descend seems to osciliate around the optimal position. Let us see how the weight vector $\theta$ evolves.
End of explanation
"""
from math import sqrt
def stochastic_gradient_descent(X, Y, alpha):
theta = [0, 0] # initializing theta with zeros (it can be initialized in another manner)
n_iteration_max = 100
loss_history = []
n_samples = len(Y)
theta_history = [theta]
for i_iteration in range(n_iteration_max):
i_sample = i_iteration % n_samples
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
# change the learning rate
alpha_sgd = alpha / sqrt(i_iteration + 1)
theta = stochastic_gradient_descent_step(X[i_sample], Y[i_sample], theta, alpha_sgd) # run the gradient update on a single sample
theta_history.append(theta)
loss = cost_function_total(X, Y, theta)
loss_history.append(loss)
print("Optimization complete. Final loss = {}".format(loss))
return theta_history, loss_history
alpha = 1.
theta_history_sgd, loss_history_sgd = stochastic_gradient_descent(Z, Y, alpha)
plt.plot(loss_history_sgd)
plt.ylabel('loss')
plt.show()
dtheta = 1.
# from -100 to +100 on both axis
theta0_vector = np.arange(0,100, dtheta)
theta1_vector = np.arange(0,100, dtheta)
n0 = len(theta0_vector)
n1 = len(theta1_vector)
loss = np.zeros([n0, n1])
for i0 in range(n0):
for i1 in range(n1):
loss[i0, i1] = cost_function_total(Z, Y, [theta0_vector[i0], theta1_vector[i1]])
fig = plt.figure()
plt.contourf(theta0_vector, theta1_vector, loss)
plt.colorbar()
for theta in theta_history_sgd:
plt.scatter(theta[1], theta[0], color="red")
plt.show()
"""
Explanation: Let's have the learning rate decrease over time and put everything together:
End of explanation
"""
|
atcemgil/notes
|
LogisticRegression.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
from IPython.display import clear_output, display, HTML
from matplotlib import rc
import scipy as sc
import scipy.optimize as opt
mpl.rc('font',**{'size': 20, 'family':'sans-serif','sans-serif':['Helvetica']})
mpl.rc('text', usetex=True)
def sigmoid(x):
return 1/(1+np.exp(-x))
def dsigmoid(x):
s = sigmoid(x)
return s*(1-s)
def inv_sigmoid(p=0.5):
xs = opt.bisect(lambda x: sigmoid(x)-p, a=-100, b=100)
return xs
def inv_sigmoid1D(w, b, p=0.5):
xs = opt.bisect(lambda x: sigmoid(w*x+b)-p, a=-100, b=100)
return xs
"""
Explanation: Logistic Regression
Logistic regression is a classification method. Its main goal is learning a function that returns a yes or no answer
when presented as input a so-called feature vector.
As an example, suppose we are given a dataset, such as the one below:
| Class| Feature1 | Feature2 |
|---| |---|
| 0 |5.7| 3.1|
| 1|-0.3|2 |
|---| |---|
| $y_i$| $x_{i,1}$ | $x_{i,2}$ |
|---| |---|
| 1|0.4|5 |
The goal is learning to predict the labels of a future dataset, where we are given only the features but not the labels:
| Class| Feature1 | Feature2 |
|---| |---|
| ? |4.8| 3.2|
| ? |-0.7|2.4 |
|---| |---|
More formally, the dataset consists of $N$ feature vectors $x_i$ and the associated labels $y_i$ for each example $i=1\dots N$. The entries of $y$ are referred typically as class labels -- but in reality $y$ could model any answer to a true-false question, such as 'is object $i$ a flower?' or 'will customer $i$ buy product $j$ during the next month?'. We can arrange the features in a matrix $X$ and the labels in a vector $y$:
\begin{eqnarray}
X & = & \begin{pmatrix}
x_{1,1} & x_{1,2} & \dots & x_{1,D} \
x_{2,1} & x_{2,2} & \dots & x_{2,D} \
\vdots & \vdots & \vdots & \vdots \
x_{i,1} & x_{i,2} & \dots & x_{i,D} \
\vdots & \vdots & \vdots & \vdots \
x_{N,1} & x_{N,2} & \dots & x_{N,D} \
\end{pmatrix} =
\begin{pmatrix}
x_1^\top \
x_2^\top \
\dots \
x_i^\top \
\dots \
x_N^\top
\end{pmatrix}
\
{y} & = & \begin{pmatrix}
y_1 \
y_2 \
\vdots \
y_i \
\vdots \
y_N
\end{pmatrix}
\end{eqnarray}
where $x_{i,j}$ denotes the $j$'th feature of the $i$'th data point.
It is common, to set a column of $X$ entirely to $1$'s, for example we take $x_{i,D}=1$ for all $i$. This 'feature' is artificially added to the dataset to allow a slightly more flexible model -- even if we don't measure any feature, the relative numbers of ones and zeros in a dataset can provide a crude estimate of the probability of a true or false answer.
Logistic Regression is a method that can be used to solve binary classification problems, like the one above.
We will encode the two classes as $y_i \in {0,1}$. The key idea is learning a mapping from a feature vector $x$ to a probability, a number between $0$ and $1$.
The generative model is
$$
\Pr{y_i = 1} = \pi_i = \sigma(x_i^\top w)
$$
Here,
$\sigma(x)$ is the sigmoid function defined as
\begin{eqnarray}
\sigma(x) & = & \frac{1}{1+e^{-x}}
\end{eqnarray}
To understand logistic regression as a generative model, consider the following metaphor: assume that for each data instance $x_i$, we select a biased coin with probability $p(y_i = 1| w, x^\top_i) = \pi_i = \sigma(x_i^\top w)$, throw the coin and label the data item with class $y_i$ accordingly.
Mathematically, we assume that each label $y_i$, or more precisely the answer to our yes-no question rearding the object $i$ with feature vector $w$ is drawn from a Bernoulli distribution. That is:
\begin{eqnarray}
\pi_i & = & \sigma(x_i^\top w) \
y_i & \sim &\mathcal{BE}(\pi)
\end{eqnarray}
Here, we think of a biased coin with two sides denoted as $H$ (head) and $T$ (tail) with probability of side $H$ as $\pi$, and consequently the probability of side $T$ with $1-\pi$.
We denote the outcome of the coin toss with the random variable $y \in {0, 1}$.
For each throw $i$, $y_i$ is the answer to the question 'Is the outcome heads?'. We write the probability as $p(y = 1) = \pi$ and probability of tails is $p(y = 0) = 1-\pi$. More compactly, the probability of the outcome of a toss, provided we know $\pi$, is written as
\begin{eqnarray}
p(y|\pi) = \pi^y(1-\pi)^{1-y}
\end{eqnarray}
Maximum Likelihood
Maximum likelihood (ML) is a method for choosing the unknown parameters of a probability distribution, given some data that is assumed to be drawn from this distribution. The distribution itself is referred as the probability model, or often just the model.
Example
Suppose we are given only $5$ outcomes when a coin is thrown:
$$
H, T, H, T, T
$$
What is the probabilty that the outcome is, say heads $H$ if we know that the coin is biased ?.
One reasonable answer may be the frequency of heads, $2/5$.
The ML solution coincides with this answer. For a derivation,
we define $y_i$ for $i = 1,2,\dots, 5$ as
$$
y_i = \left{ \begin{array}{cc} 1 & \text{coin $i$ is H} \ 0 & \text{coin $i$ is T} \end{array} \right.
$$
hence
$$
y = [1,0,1,0,0]^\top
$$
If we assume that the outcomes were independent, the probability of observing the above sequence as a function of the parameter $\pi$ is the product of each individual probability
$$
\Pr{y = [1,0,1,0,0]^\top} = \pi \cdot (1-\pi) \cdot \pi \cdot (1-\pi) \cdot(1-\pi)
$$
We could try finding the $\pi$ value that maximizes this function. We will call the corresponding value as the maximum likelhood solution, and denote it as $\pi^*$.
It is often more convenient to work with the logarithm of this function, known as the loglikelihood function.
$$
\mathcal{L}(\pi) = 2 \log \pi + 3 \log (1-\pi)
$$
For finding the maximum, we take the derivative with respect to $\pi$ and set to zero.
$$
\frac{d \mathcal{L}(\pi)}{d \pi} = \frac{2}{\pi^} - \frac{3}{1-\pi^} = 0
$$
When we solve we obtain $$ \pi^* = \frac{2}{5} $$
More generally, when we observe $y_i$ for $i=1 \dots N$, the loglikelihood is
\begin{eqnarray}
\mathcal{L}(\pi)& = & \log \left(\prod_{i : y_i=1} \pi \right) \left(\prod_{i : y_i=0}(1- \pi) \right) \
& = & \log \prod_{i = 1}^N \pi^{y_i} (1- \pi)^{1-y_i} \
& = & \log \pi^{ \sum_i y_i} (1- \pi)^{\sum_i (1-y_i) } \
& = & \left(\sum_i y_i\right) \log \pi + \left(\sum_i (1-y_i) \right) \log (1- \pi)
\end{eqnarray}
If we define the number of observed $0$'s and $1$'s by $c_0$ and $c_1$ respectively, we have
\begin{eqnarray}
\mathcal{L}(\pi)& = & c_1 \log \pi + c_0 \log (1- \pi)
\end{eqnarray}
Taking the derivative and setting to $0$ results in
$$
\pi^* = \frac{c_1}{c_0+c_1} = \frac{c_1}{N}
$$
End of explanation
"""
fig = plt.figure(figsize=(10,6))
ax = fig.gca()
ax.set_ylim([-0.1,1.1])
x = np.linspace(-10,10,100)
ax.set_xlim([-10,10])
ln = plt.Line2D(x, sigmoid(x))
ln2 = plt.axvline([0], ls= ':', color='k')
ln_left = plt.axvline([0], ls= ':', color='b')
ln_right = plt.axvline([0], ls= ':', color='r')
ax.add_line(ln)
plt.close(fig)
ax.set_xlabel('$x$')
ax.set_ylabel('$\sigma(wx + b)$')
def plot_fun(w=1, b=0):
ln.set_ydata(sigmoid(w*x+b))
if np.abs(w)>0.00001:
ln2.set_xdata(inv_sigmoid1D(w,b,0.5))
ln_left.set_xdata(inv_sigmoid1D(w,b,0.25))
ln_right.set_xdata(inv_sigmoid1D(w,b,0.75))
display(fig)
res = interact(plot_fun, w=(-5, 5, 0.1), b=(-10.0,10.0,0.1))
def LR_loglikelhood(X, y, w):
tmp = X.dot(w)
return y.T.dot(tmp) - np.sum(np.log(np.exp(tmp)+1))
w = np.array([0.5, 2, 3])
D = 3
N = 20
# Some random features
X = 2*np.random.randn(N,D)
X[:,0] = 1
# Generate class labels
pi = sigmoid(np.dot(X, w))
y = np.array([1 if u else 0 for u in np.random.rand(N) < pi]).reshape((N))
xl = -5.
xr = 5.
yl = -5.
yr = 5.
fig = plt.figure(figsize=(5,5))
plt.plot(X[y==1,1],X[y==1,2],'xr')
plt.plot(X[y==0,1],X[y==0,2],'ob')
ax = fig.gca()
ax.set_ylim([yl, yr])
ax.set_xlim([xl, xr])
ln = plt.Line2D([],[],color='k')
ln_left = plt.Line2D([],[],ls= ':', color='b')
ln_right = plt.Line2D([],[],ls= ':', color='r')
ax.add_line(ln)
ax.add_line(ln_left)
ax.add_line(ln_right)
plt.close(fig)
ax.set_xlabel('$x_1$')
#ax.grid(xdata=np.linspace(xl,xr,0.1))
#ax.grid(ydata=np.linspace(yl,yr,0.1))
ax.set_ylabel('$x_2$')
ax.set_xticks(np.arange(xl,xr))
ax.set_yticks(np.arange(yl,yr))
ax.grid(True)
def plot_boundry(w0,w1,w2):
if w1 != 0:
xa = -(w0+w2*yl)/w1
xb = -(w0+w2*yr)/w1
ln.set_xdata([xa, xb])
ln.set_ydata([yl, yr])
xa = -(-inv_sigmoid(0.25) + w0+w2*yl)/w1
xb = -(-inv_sigmoid(0.25) + w0+w2*yr)/w1
ln_left.set_xdata([xa, xb])
ln_left.set_ydata([yl, yr])
xa = -(-inv_sigmoid(0.75) + w0+w2*yl)/w1
xb = -(-inv_sigmoid(0.75) + w0+w2*yr)/w1
ln_right.set_xdata([xa, xb])
ln_right.set_ydata([yl, yr])
elif w2!=0:
ya = -(w0+w1*xl)/w2
yb = -(w0+w1*xr)/w2
ln.set_xdata([xl, xr])
ln.set_ydata([ya, yb])
ya = -(-inv_sigmoid(0.25) + w0+w1*xl)/w2
yb = -(-inv_sigmoid(0.25) + w0+w1*xr)/w2
ln_left.set_xdata([xl, xr])
ln_left.set_ydata([ya, yb])
ya = -(-inv_sigmoid(0.75) + w0+w1*xl)/w2
yb = -(-inv_sigmoid(0.75) + w0+w1*xr)/w2
ln_right.set_xdata([xl, xr])
ln_right.set_ydata([ya, yb])
else:
ln.set_xdata([])
ln.set_ydata([])
ax.set_title('$\mathcal{L}(w) = '+str(LR_loglikelhood(X, y, np.array([w0, w1, w2])))+'$')
display(fig)
res = interact(plot_boundry, w0=(-3.5, 3, 0.1), w1=(-3.,4,0.1), w2=(-3.,4,0.1))
"""
Explanation: Plotting the Sigmoid
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
def logsumexp(a,b):
m = np.max([a,b])
return m + np.log(np.exp(a-m) + np.exp(b-m))
def hinge(x):
return x if x>0 else 0
xx = np.arange(-5,3,0.1)
plt.figure(figsize=(12,10))
for i,alpha in enumerate([1,2,5,10]):
f = [logsumexp(0, alpha*z)/alpha for z in xx]
h = [hinge(z) for z in xx]
plt.subplot(2,2,i+1)
plt.plot(xx, f, 'r')
plt.plot(xx, h, 'k:')
plt.xlabel('z')
#plt.title('a = '+ str(alpha))
if alpha==1:
plt.legend([ 'logsumexp(0,z)','hinge(z)' ], loc=2 )
else:
plt.legend([ 'logsumexp(0,{a} z)/{a}'.format(a=alpha),'hinge(z)' ], loc=2 )
plt.show()
"""
Explanation: Logistic Regression: Learning the parameters
The logistic regression model is very similar to the coin model. The main difference is that for each example $i$, we use a specific coin with a probability $\sigma(x_i^\top w)$ that depends on the specific feature vector $x_i$ and the parameter vector $w$ that is shared by all examples. The likelihood of the observations, that is the probability of observing the class sequence is
$\begin{eqnarray}
p(y_1, y_2, \dots, y_N|w, X ) &=& \left(\prod_{i : y_i=1} \sigma(x_i^\top w) \right) \left(\prod_{i : y_i=0}(1- \sigma(x_i^\top w)) \right)
\end{eqnarray}
$
Here, the left product is the expression for examples from class $1$ and the right product is for examples from class $0$.
We will look for the particular setting of the weight vector, the maximum likelihood solution, denoted by $w^*$.
$
\begin{eqnarray}
w^* & = & \arg\max_{w} {\cal L}(w)
\end{eqnarray}
$
where the loglikelihood function
$
\begin{eqnarray}
{\cal L}(w) & = & \log p(y_1, y_2, \dots, y_N|w, x_1, x_2, \dots, x_N ) \
& = & \sum_{i : y_i=1} \log \sigma(x_i^\top w) + \sum_{i : y_i=0} \log (1- \sigma(x_i^\top w)) \
& = & \sum_{i : y_i=1} x_i^\top w - \sum_{i : y_i=1} \log(1+e^{x_i^\top w}) - \sum_{i : y_i=0}\log({1+e^{x_i^\top w}}) \
& = & \sum_i y_i x_i^\top w - \sum_{i} \log(1+e^{x_i^\top w}) \
& = & y^\top X w - \mathbf{1}^\top \text{logsumexp}(0, X w)
\end{eqnarray}
$
$\mathbf{1}$ is a vector of ones; note that when we premultiply a vector $v$ by $\mathbf{1}^T$ we get the sum of the entries of $v$, i.e. $\mathbf{1}^T v = \sum_i v_i$.
We define the function $\text{logsumexp}(a, b)$ as follows: When $a$ and $b$ are scalars,
$$
f = \text{logsumexp}(a, b) \equiv \log(e^a + e^b)
$$
When $a$ and $b$ are vectors of the same size, $f$ is the same size as $a$ and $b$ where each entry of $f$ is
$$
f_i = \text{logsumexp}(a_i, b_i) \equiv \log(e^{a_i} + e^{b_i})
$$
Unlike the least-squares problem, an expression for direct evaluation of $w^*$ is not known so we need to resort to numerical optimization.
Before we proceed, it is informative to look at the shape of $f(x) = \text{logsumexp}(0, x)$.
When $x$ is negative and far smaller than zero, $f = 0$ and for large values of $x$, $f(x) = x$. Hence it looks like a so-called hinge function $h$
$$
h(x) = \left{ \begin{array}{cc} 0 & x < 0 \x & x \geq 0 \end{array} \right.
$$
We define
$$
f_\alpha(x) = \frac{1}{\alpha}\text{logsumexp}(0, \alpha x)
$$
When $\alpha = 1$, we have the original logsumexp function. For larger $\alpha$, it becomes closer to the hinge loss.
End of explanation
"""
xx = np.arange(-10,10,0.1)
y = 1
f = [-y*z + logsumexp(0, z) for z in xx]
f0 = [logsumexp(0, z) for z in xx]
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(xx, f, 'r')
plt.xlabel('$z_i$')
plt.ylabel('$-l_i$')
plt.title('Cost for examples with $y = $'+str(y))
plt.subplot(1,2,2)
plt.plot(xx, f0, 'r')
plt.xlabel('$z_i$')
plt.ylabel('$-l_i$')
plt.title('Cost for examples with $y = 0$')
plt.show()
"""
Explanation: The resemblance of the logsumexp function to an hinge function provides a nice interpretation of the log likelihood. Consider the negative log likelihood written in terms of the contributions of each single item:
$$
- \mathcal{L}(\pi) = - \sum_i l_i(w)
$$
We denote the inner product of the features of item $i$ and the parameters as $z_i = x_i^\top w$.
Then define the 'error' made on a single item as the minus likelihood
$$
E_i(w) \equiv -l_i(w) = - y_i x_i^\top w + \text{logsumexp}(0, x_i^\top w) = - y_i z_i + \text{logsumexp}(0, z_i)
$$
Suppose, the target class $y_i = 1$. When $z_i \gg 0$, the item $i$ will be classified correctly and won't contribute to the total error as $-l_i(w) \approx 0$. However, when $z_i \ll 0$, the $\text{logsumexp}$ term will be zero and this will incur an error of $-z_i$. If instead the true target would have been $y_i = 0$ the error reduces to
$E_i(w) \approx \text{logsumexp}(0, z_i)$, incurring no error when $z_i \ll 0$ and incuring an error of approximately $z_i$ when $z_i \gg 0$.
Below, we show the error for a range of outputs $z_i = x_i^\top w$ when the target is $1$ or $0$. When the target is $y=1$, we penalize each negative output, if the target is $y =0$ positive outputs are penalized.
End of explanation
"""
%matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
# Generate a random logistic regression problem
def sigmoid(t):
return np.exp(t)/(1+np.exp(t))
def generate_toy_dataset(number_of_features=3, number_of_datapoints=20, styles = ['ob', 'xr']):
D = number_of_features
N = number_of_datapoints
# Some random features
X = 2*np.random.rand(N,D)-1
X[:,0] = 1
# Generate a random paramater vector
w_true = np.random.randn(D,1)
# Generate class labels
pi = sigmoid(np.dot(X, w_true))
y = np.array([1 if u else 0 for u in np.random.rand(N,1) < pi]).reshape((N))
return X, y, w_true, D, N
styles = ['ob', 'xr']
X, y, w_true, D, N = generate_toy_dataset(number_of_features=3, number_of_datapoints=20, styles=styles)
xl = -1.5; xr = 1.5; yl = -1.5; yr = 1.5
fig = plt.figure(figsize=(5,5))
plt.plot(X[y==1,1],X[y==1,2],styles[1])
plt.plot(X[y==0,1],X[y==0,2],styles[0])
ax = fig.gca()
ax.set_ylim([yl, yr])
ax.set_xlim([xl, xr])
plt.show()
# Implement Gradient Descent
w = np.random.randn(D)
# Learnig rate
eta = 0.05
W = []
MAX_ITER = 200
for epoch in range(MAX_ITER):
W.append(w)
dL = np.dot(X.T, y-sigmoid(np.dot(X,w)))
w = w + eta*dL
# Implement Gradient Descent
w = np.random.randn(D)
# Learnig rate
eta = 0.05
MAX_ITER = 200
for epoch in range(MAX_ITER):
dL = 0
for i in range(X.shape[0]):
dL = dL + X[i,:].T*(y[i]-sigmoid(X[i,:].dot(w)))
w = w + eta*dL
xl = -1.5
xr = 1.5
yl = -1.5
yr = 1.5
fig = plt.figure(figsize=(5,5))
ax = fig.gca()
ax.set_ylim([yl, yr])
ax.set_xlim([xl, xr])
plt.plot(X[y==1,1],X[y==1,2],styles[1])
plt.plot(X[y==0,1],X[y==0,2],styles[0])
ln = plt.Line2D([],[],color='k')
ln_left = plt.Line2D([],[],ls= ':', color=styles[0][1])
ln_right = plt.Line2D([],[],ls= ':', color=styles[1][1])
ax.add_line(ln)
ax.add_line(ln_left)
ax.add_line(ln_right)
plt.close(fig)
ax.set_xlabel('$x_1$')
ax.set_ylabel('$x_2$')
ax.set_xticks(np.arange(xl,xr))
ax.set_yticks(np.arange(yl,yr))
ax.grid(True)
def plot_boundry(w0,w1,w2):
if w1 != 0:
xa = -(w0+w2*yl)/w1
xb = -(w0+w2*yr)/w1
ln.set_xdata([xa, xb])
ln.set_ydata([yl, yr])
xa = -(-inv_sigmoid(0.25) + w0+w2*yl)/w1
xb = -(-inv_sigmoid(0.25) + w0+w2*yr)/w1
ln_left.set_xdata([xa, xb])
ln_left.set_ydata([yl, yr])
xa = -(-inv_sigmoid(0.75) + w0+w2*yl)/w1
xb = -(-inv_sigmoid(0.75) + w0+w2*yr)/w1
ln_right.set_xdata([xa, xb])
ln_right.set_ydata([yl, yr])
elif w2!=0:
ya = -(w0+w1*xl)/w2
yb = -(w0+w1*xr)/w2
ln.set_xdata([xl, xr])
ln.set_ydata([ya, yb])
ya = -(-inv_sigmoid(0.25) + w0+w1*xl)/w2
yb = -(-inv_sigmoid(0.25) + w0+w1*xr)/w2
ln_left.set_xdata([xl, xr])
ln_left.set_ydata([ya, yb])
ya = -(-inv_sigmoid(0.75) + w0+w1*xl)/w2
yb = -(-inv_sigmoid(0.75) + w0+w1*xr)/w2
ln_right.set_xdata([xl, xr])
ln_right.set_ydata([ya, yb])
else:
ln.set_xdata([])
ln.set_ydata([])
display(fig)
def plot_boundry_of_weight(iteration=0):
i = iteration
w = W[i]
plot_boundry(w[0],w[1],w[2])
interact(plot_boundry_of_weight, iteration=(0,len(W)-1))
plot_boundry_of_weight(-1)
"""
Explanation: Properties of the logsumexp function
If
$$
f(z) = \text{logsumexp}(0, z) = \log(1 + \exp(z))
$$
The derivative is
$$
\frac{df(z)}{dz} = \frac{\exp(z)}{1 + \exp(z)} = \sigma(z)
$$
When $z$ is a vector, $f(z)$ is a vector. The derivative of
$$
\sum_i f(z_i) = \mathbf{1}^\top f(z)
$$
$$
\frac{d \mathbf{1}^\top f(z)}{dz} = \left(\begin{array}{c} \sigma(z_1) \ \vdots \ \sigma(z_N) \end{array} \right) \equiv \sigma(z)
$$
where the sigmoid function $\sigma$ is applied elementwise to $z$.
Properties of the sigmoid function
Note that
\begin{eqnarray}
\sigma(x) & = & \frac{e^x}{(1+e^{-x})e^x} = \frac{e^x}{1+e^{x}} \
1 - \sigma(x) & = & 1 - \frac{e^x}{1+e^{x}} = \frac{1+e^{x} - e^x}{1+e^{x}} = \frac{1}{1+e^{x}}
\end{eqnarray}
\begin{eqnarray}
\sigma'(x) & = & \frac{e^x(1+e^{x}) - e^{x} e^x}{(1+e^{x})^2} = \frac{e^x}{1+e^{x}}\frac{1}{1+e^{x}} = \sigma(x) (1-\sigma(x))
\end{eqnarray}
\begin{eqnarray}
\log \sigma(x) & = & -\log(1+e^{-x}) = x - \log(1+e^{x}) \
\log(1 - \sigma(x)) & = & -\log({1+e^{x}})
\end{eqnarray}
Exercise: Plot the sigmoid function and its derivative.
Exercise: Show that
$\tanh(z) = 2\sigma(2z) - 1$
Solve
$$
\text{maximize}\; \mathcal{L}(w)
$$
Optimization via gradient ascent
One way for
optimization is gradient ascent
\begin{eqnarray}
w^{(\tau)} & \leftarrow & w^{(\tau-1)} + \eta \nabla_w {\cal L}
\end{eqnarray}
where
\begin{eqnarray}
\nabla_w {\cal L} & = &
\begin{pmatrix}
{\partial {\cal L}}/{\partial w_1} \
{\partial {\cal L}}/{\partial w_2} \
\vdots \
{\partial {\cal L}}/{\partial w_{D}}
\end{pmatrix}
\end{eqnarray}
is the gradient vector and $\eta$ is a learning rate.
Evaluating the gradient (Short Derivation)
$$
\mathcal{L}(w) = y^\top X w - \mathbf{1}^\top \text{logsumexp}(0, X w)
$$
$$
\frac{d\mathcal{L}(w)}{dw} = X^\top y - X^\top \sigma(X w) = X^\top (y -\sigma(X w))
$$
Evaluating the gradient (Long Derivation)
The partial derivative of the loglikelihood with respect to the $k$'th entry of the weight vector is given by the chain rule as
\begin{eqnarray}
\frac{\partial{\cal L}}{\partial w_k} & = & \frac{\partial{\cal L}}{\partial \sigma(u)} \frac{\partial \sigma(u)}{\partial u} \frac{\partial u}{\partial w_k}
\end{eqnarray}
\begin{eqnarray}
{\cal L}(w) & = & \sum_{i : y_i=1} \log \sigma(w^\top x_i) + \sum_{i : y_i=0} \log (1- \sigma(w^\top x_i))
\end{eqnarray}
\begin{eqnarray}
\frac{\partial{\cal L}(\sigma)}{\partial \sigma} & = & \sum_{i : y_i=1} \frac{1}{\sigma(w^\top x_i)} - \sum_{i : y_i=0} \frac{1}{1- \sigma(w^\top x_i)}
\end{eqnarray}
\begin{eqnarray}
\frac{\partial \sigma(u)}{\partial u} & = & \sigma(w^\top x_i) (1-\sigma(w^\top x_i))
\end{eqnarray}
\begin{eqnarray}
\frac{\partial w^\top x_i }{\partial w_k} & = & x_{i,k}
\end{eqnarray}
So the gradient is
\begin{eqnarray}
\frac{\partial{\cal L}}{\partial w_k} & = & \sum_{i : y_i=1} \frac{\sigma(w^\top x_i) (1-\sigma(w^\top x_i))}{\sigma(w^\top x_i)} x_{i,k} - \sum_{i : y_i=0} \frac{\sigma(w^\top x_i) (1-\sigma(w^\top x_i))}{1- \sigma(w^\top x_i)} x_{i,k} \
& = & \sum_{i : y_i=1} {(1-\sigma(w^\top x_i))} x_{i,k} - \sum_{i : y_i=0} {\sigma(w^\top x_i)} x_{i,k}
\end{eqnarray}
We can write this expression more compactly by noting
\begin{eqnarray}
\frac{\partial{\cal L}}{\partial w_k} & = & \sum_{i : y_i=1} {(\underbrace{1}{y_i}-\sigma(w^\top x_i))} x{i,k} + \sum_{i : y_i=0} {(\underbrace{0}{y_i} - \sigma(w^\top x_i))} x{i,k} \
& = & \sum_i (y_i - \sigma(w^\top x_i)) x_{i,k}
\end{eqnarray}
$\newcommand{\diag}{\text{diag}}$
Test on a synthetic problem
We generate a random dataset and than try to learn to classify this dataset
End of explanation
"""
#x = np.matrix('[-2,1; -1,2; 1,5; -1,1; -3,-2; 1,1] ')
x = np.matrix('[-0.5,0.5;2,-1;-1,-1;1,1;1.5,0.5]')
#y = np.matrix('[0,0,1,0,0,1]').T
y = np.matrix('[0,0,1,1,1]').T
N = x.shape[0]
#A = np.hstack((np.power(x,0), np.power(x,1), np.power(x,2)))
#X = np.hstack((x, np.ones((N,1)) ))
X = x
def sigmoid(x):
return 1/(1+np.exp(-x))
idx = np.nonzero(y)[0]
idxc = np.nonzero(1-y)[0]
fig = plt.figure(figsize=(8,4))
plt.plot(x[idx,0], x[idx,1], 'rx')
plt.plot(x[idxc,0], x[idxc,1], 'bo')
fig.gca().set_xlim([-1.1,2.1])
fig.gca().set_ylim([-1.1,1.1])
print(idxc)
print(idx)
plt.show()
from itertools import product
def ellipse_line(A, mu, col='b'):
'''
Creates an ellipse from short line segments y = A x + \mu
where x is on the unit circle.
'''
N = 18
th = np.arange(0, 2*np.pi+np.pi/N, np.pi/N)
X = np.mat(np.vstack((np.cos(th),np.sin(th))))
Y = A*X
ln = plt.Line2D(mu[0]+Y[0,:],mu[1]+Y[1,:],markeredgecolor='w', linewidth=1, color=col)
return ln
left = -5
right = 3
bottom = -5
top = 7
step = 0.1
W0 = np.arange(left,right, step)
W1 = np.arange(bottom,top, step)
LLSurf = np.zeros((len(W1),len(W0)))
# y^\top X w - \mathbf{1}^\top \text{logsumexp}(0, X w)
vmax = -np.inf
vmin = np.inf
for i,j in product(range(len(W1)), range(len(W0))):
w = np.matrix([W0[j], W1[i]]).T
p = X*w
ll = y.T*p - np.sum(np.log(1+np.exp(p)))
vmax = np.max((vmax, ll))
vmin = np.min((vmin, ll))
LLSurf[i,j] = ll
fig = plt.figure(figsize=(10,10))
plt.imshow(LLSurf, interpolation='nearest',
vmin=vmin, vmax=vmax,origin='lower',
extent=(left,right,bottom,top),cmap=plt.cm.jet)
plt.xlabel('w0')
plt.ylabel('w1')
plt.colorbar()
W0 = np.arange(left+2,right-5, 12*step)
W1 = np.arange(bottom+1,top-10, 12*step)
for i,j in product(range(len(W1)), range(len(W0))):
w = np.matrix([W0[j], W1[i]]).T
#w = np.mat([-1,1]).T
p = sigmoid(X*w)
dw = 0.2*X.T*(y-p)
#print(p)
S = np.mat(np.diag(np.asarray(np.multiply(p,1-p)).flatten()))
H = X.T*S*X
dw_nwt = 0.08*H.I*X.T*(y-p)
C = np.linalg.cholesky(H.I)
# plt.hold(True)
ln = ellipse_line(C/3., w, 'w')
ax = fig.gca()
ax.add_line(ln)
ln2 = plt.Line2D((float(w[0]), float(w[0]+dw[0])), (float(w[1]), float(w[1]+dw[1])),color='y')
ax.add_line(ln2)
ln3 = plt.Line2D((float(w[0]), float(w[0]+dw_nwt[0])), (float(w[1]), float(w[1]+dw_nwt[1])),color='w')
ax.add_line(ln3)
plt.plot(w[0,0],w[1,0],'.w')
#print(C)
#print(S)
ax.set_xlim((left,right))
ax.set_ylim((bottom,top))
plt.show()
print(y)
print(X)
#w = np.random.randn(3,1)
w = np.mat('[1;2]')
print(w)
print(sigmoid(X*w))
eta = 0.1
for i in range(10000):
pr = sigmoid(X*w)
w = w + eta*X.T*(y-pr)
print(np.hstack((y,pr)))
print(w)
"""
Explanation: Second order optimization
Newton's method
Evaluating the Hessian
The Hessian is
\begin{eqnarray}
\frac{\partial^2{\cal L}}{\partial w_k \partial w_r} & = & - \sum_i (1-\sigma(w^\top x_i)) \sigma(w^\top x_i) x_{i,k} x_{i,r} \
\pi & \equiv & \sigma(X w) \
\nabla \nabla^\top \mathcal{L}& = & -X^\top \diag(\pi(1 - \pi)) X
\end{eqnarray}
The update rule is
\begin{eqnarray}
w^{(\tau)} = w^{(\tau-1)} + \eta X^\top (y-\sigma(X w))
\end{eqnarray}
End of explanation
"""
%matplotlib inline
from cvxpy import *
import numpy as np
import matplotlib as mpl
import matplotlib.pylab as plt
"""
Explanation: Optimization Frameworks
CVX -- Convex Optimization
CVX is a framework that can be used for solving convex optimization problems.
Convex optimization includes many problems of interest; for example the minimization of the negative loglikelihood of the logistic regression is a convex problem. S
Unfortunately, many important problems and interesting problems
End of explanation
"""
def sigmoid(x):
return 1/(1+np.exp(-x))
# Number of data points
N = 1000
# Number of relevant features
K = 10
# Number of irrelevant features
Ke = 30
# Generate random features
X = np.matrix(np.random.randn(N, K + Ke))
# Generate parameters and set the irrelevant ones to zero
w_true = np.random.randn(K + Ke,1)
w_true[K:] = 0
p = sigmoid(X*w_true)
u = np.random.rand(N,1)
y = (u < p)
y = y.astype(np.float64)
# Regularization coefficient
lam = 100.
zero_vector = np.zeros((N,1))
# Construct the problem.
w = Variable(K+Ke)
objective = Minimize(lam*norm(w, np.inf ) -y.T*X*w + sum_entries(log_sum_exp(hstack(zero_vector, X*w),axis=1)))
prob = Problem(objective)
# The optimal objective is returned by prob.solve().
result = prob.solve()
# The optimal value for x is stored in x.value.
#print(w.value)
plt.figure(figsize=(10,4))
plt.stem(w.value, markerfmt='ob')
plt.stem(w_true, markerfmt='xr')
plt.gca().set_xlim((-1, K+Ke))
plt.legend(['Estimated', 'True'])
plt.show()
"""
Explanation: Selecting relevant features with regularization
Below we generate a dataset with some irrelevant features that are not informative for classification
Maximize
$$
\mathcal{L}(w) + \lambda \|w\|_p
$$
End of explanation
"""
X_np, y_np, w_true_np, M, N = generate_toy_dataset(number_of_features=3, number_of_datapoints=20)
"""
Explanation: Optimization with pytorch
End of explanation
"""
# Initialization
w_np = np.ones(M)
# Learnig rate
eta = 0.01
MAX_ITER = 100
for epoch in range(MAX_ITER):
sig = sigmoid(np.dot(X_np,w_np))
# Gradient dLL/dw -- symbolically derived and hard coded
w_grad = np.dot(X_np.T, y_np-sig)
# Gradient ascent step
w_np = w_np + eta*w_grad
print(w_np)
"""
Explanation: Gradient Descent for Logistic Regression: Reference implementation in numpy
End of explanation
"""
import torch
import torch.autograd
from torch.autograd import Variable
#sigmoid_f = torch.nn.Sigmoid()
def sigmoid_f(x):
return 1./(1. + torch.exp(-x))
X = Variable(torch.from_numpy(X_np).double())
y = Variable(torch.from_numpy(y_np.reshape(N,1)).double())
# Implementation
w = Variable(torch.ones(M,1).double(), requires_grad=True)
eta = 0.01
MAX_ITER = 100
for epoch in range(MAX_ITER):
sig = sigmoid_f(torch.matmul(X, w))
# Compute the loglikelihood
LL = torch.sum(y*torch.log(sig) + (1-y)*torch.log(1-sig))
# Compute the gradients by automated differentiation
LL.backward()
# The gradient ascent step
w.data.add_(eta*w.grad.data)
# Reset the gradients, as otherwise they are accumulated in w.grad
w.grad.zero_()
print(w.data.numpy())
%connect_info
"""
Explanation: Gradient Descent for Logistic Regression: First implementation in pytorch
End of explanation
"""
|
cloudmesh/book
|
notebooks/scikit-learn/scikit-learn-k-means.ipynb
|
apache-2.0
|
! pip install numpy
! pip install scipy -U
! pip install -U scikit-learn
"""
Explanation: Instalation
Source: ...
Scikit-learn requires:
Python (>= 2.6 or >= 3.3),
NumPy (>= 1.6.1),
SciPy (>= 0.9).
If you already have a working installation of numpy and scipy, the easiest way to install scikit-learn is using pip
End of explanation
"""
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
"""
Explanation: Import
End of explanation
"""
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_,
n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
"""
Explanation: Create samples
End of explanation
"""
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
"""
Explanation: Visualize
End of explanation
"""
|
aattaran/Machine-Learning-with-Python
|
titanic/titanic_survival_exploration[1].ipynb
|
bsd-3-clause
|
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
"""
Explanation: Machine Learning Engineer Nanodegree
Introduction and Foundations
Project: Titanic Survival Exploration
In 1912, the ship RMS Titanic struck an iceberg on its maiden voyage and sank, resulting in the deaths of most of its passengers and crew. In this introductory project, we will explore a subset of the RMS Titanic passenger manifest to determine which features best predict whether someone survived or did not survive. To complete this project, you will need to implement several conditional predictions and answer the questions below. Your project submission will be evaluated based on the completion of the code and your responses to the questions.
Tip: Quoted sections like this will provide helpful instructions on how to navigate and use an iPython notebook.
Getting Started
To begin working with the RMS Titanic passenger data, we'll first need to import the functionality we need, and load our data into a pandas DataFrame.
Run the code cell below to load our data and display the first few entries (passengers) for examination using the .head() function.
Tip: You can run a code cell by clicking on the cell and using the keyboard shortcut Shift + Enter or Shift + Return. Alternatively, a code cell can be executed using the Play button in the hotbar after selecting it. Markdown cells (text cells like this one) can be edited by double-clicking, and saved using these same shortcuts. Markdown allows you to write easy-to-read plain text that can be converted to HTML.
End of explanation
"""
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
"""
Explanation: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
- Survived: Outcome of survival (0 = No; 1 = Yes)
- Pclass: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
- Name: Name of passenger
- Sex: Sex of the passenger
- Age: Age of the passenger (Some entries contain NaN)
- SibSp: Number of siblings and spouses of the passenger aboard
- Parch: Number of parents and children of the passenger aboard
- Ticket: Ticket number of the passenger
- Fare: Fare paid by the passenger
- Cabin Cabin number of the passenger (Some entries contain NaN)
- Embarked: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
Since we're interested in the outcome of survival for each passenger or crew member, we can remove the Survived feature from this dataset and store it as its own separate variable outcomes. We will use these outcomes as our prediction targets.
Run the code cell below to remove Survived as a feature of the dataset and store it in outcomes.
End of explanation
"""
def accuracy_score(truth, pred):
""" Returns accuracy score for input truth and predictions. """
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
"""
Explanation: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcomes[i].
To measure the performance of our predictions, we need a metric to score our predictions against the true outcomes of survival. Since we are interested in how accurate our predictions are, we will calculate the proportion of passengers where our prediction of their survival is correct. Run the code cell below to create our accuracy_score function and test a prediction on the first five passengers.
Think: Out of the first five passengers, if we predict that all of them survived, what would you expect the accuracy of our predictions to be?
End of explanation
"""
def predictions_0(data):
""" Model with no features. Always predicts a passenger did not survive. """
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
"""
Explanation: Tip: If you save an iPython Notebook, the output from running code blocks will also be saved. However, the state of your workspace will be reset once a new session is started. Make sure that you run all of the code blocks from your previous session to reestablish variables and functions before picking up where you last left off.
Making Predictions
If we were asked to make a prediction about any passenger aboard the RMS Titanic whom we knew nothing about, then the best prediction we could make would be that they did not survive. This is because we can assume that a majority of the passengers (more than 50%) did not survive the ship sinking.
The predictions_0 function below will always predict that a passenger did not survive.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 1
Using the RMS Titanic data, how accurate would a prediction be that none of the passengers survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
vs.survival_stats(data, outcomes, 'Sex')
"""
Explanation: Answer: Replace this text with the prediction accuracy you found above.
Predictions have an accuracy of 61.62%.
Let's take a look at whether the feature Sex has any indication of survival rates among passengers using the survival_stats function. This function is defined in the visuals.py Python script included with this project. The first two parameters passed to the function are the RMS Titanic data and passenger survival outcomes, respectively. The third parameter indicates which feature we want to plot survival statistics across.
Run the code cell below to plot the survival outcomes of passengers based on their sex.
End of explanation
"""
def predictions_1(data):
""" Model with one feature:
- Predict a passenger survived if they are female. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
#pass
if passenger['Sex']=="female":
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
"""
Explanation: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction: If a passenger was female, then we will predict that they survived. Otherwise, we will predict the passenger did not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can access the values of each feature for a passenger like a dictionary. For example, passenger['Sex'] is the sex of the passenger.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 2
How accurate would a prediction be that all female passengers survived and the remaining passengers did not survive?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
vs.survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
"""
Explanation: Answer: Replace this text with the prediction accuracy you found above.
Predictions have an accuracy of 78.68%.
Using just the Sex feature for each passenger, we are able to increase the accuracy of our predictions by a significant margin. Now, let's consider using an additional feature to see if we can further improve our predictions. For example, consider all of the male passengers aboard the RMS Titanic: Can we find a subset of those passengers that had a higher rate of survival? Let's start by looking at the Age of each male, by again using the survival_stats function. This time, we'll use a fourth parameter to filter out the data so that only passengers with the Sex 'male' will be included.
Run the code cell below to plot the survival outcomes of male passengers based on their age.
End of explanation
"""
def predictions_2(data):
""" Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
#pass
if passenger["Sex"]=="female":
predictions.append(1)
#elif passenger["Sex"]=="male":
# predictions.append(0)
elif passenger["Sex"]=="male" and passenger["Age"] < 10:
predictions.append(1)
#elif passenger["Sex"]=="male" and passenger["Age"] > 10:
# predictions.append(0)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
"""
Explanation: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction: If a passenger was female, then we will predict they survive. If a passenger was male and younger than 10, then we will also predict they survive. Otherwise, we will predict they do not survive.
Fill in the missing code below so that the function will make this prediction.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_1.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 3
How accurate would a prediction be that all female passengers and all male passengers younger than 10 survived?
Hint: Run the code cell below to see the accuracy of this prediction.
End of explanation
"""
vs.survival_stats(data, outcomes, 'Sex', [ "Pclass == 3" ])
"""
Explanation: Predictions have an accuracy of 79.35%.
Answer: Replace this text with the prediction accuracy you found above.
Adding the feature Age as a condition in conjunction with Sex improves the accuracy by a small margin more than with simply using the feature Sex alone. Now it's your turn: Find a series of features and conditions to split the data on to obtain an outcome prediction accuracy of at least 80%. This may require multiple features and multiple levels of conditional statements to succeed. You can use the same feature multiple times with different conditions.
Pclass, Sex, Age, SibSp, and Parch are some suggested features to try.
Use the survival_stats function below to to examine various survival statistics.
Hint: To use mulitple filter conditions, put each condition in the list passed as the last argument. Example: ["Sex == 'male'", "Age < 18"]
End of explanation
"""
vs.survival_stats(data, outcomes, 'Age', ["Sex == 'female'" , "Embarked == C"])
"""
Explanation: vs.survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age < 18"])
End of explanation
"""
def predictions_3(data):
""" Model with multiple features. Makes a prediction with an accuracy of at least 80%. """
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
#pass
#if passenger["Sex"] == "female" :
if passenger["Sex"] == "female":
if passenger["Pclass"] ==3 :
predictions.append(0)
else:
predictions.append(1)
else:
if passenger['Age'] < 10 and passenger['Pclass'] in (1, 2):
predictions.append(1)
elif passenger['Age'] < 18 and passenger['Pclass'] == 1:
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
"""
Explanation: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Make sure to keep track of the various features and conditions you tried before arriving at your final prediction model.
Hint: You can start your implementation of this function using the prediction code you wrote earlier from predictions_2.
End of explanation
"""
print accuracy_score(outcomes, predictions)
"""
Explanation: Question 4
Describe the steps you took to implement the final prediction model so that it got an accuracy of at least 80%. What features did you look at? Were certain features more informative than others? Which conditions did you use to split the survival outcomes in the data? How accurate are your predictions?
Hint: Run the code cell below to see the accuracy of your predictions.
End of explanation
"""
|
eggie5/ipython-notebooks
|
avengers/Avengers.ipynb
|
mit
|
import pandas as pd
avengers = pd.read_csv("avengers.csv")
avengers.head(5)
"""
Explanation: Avengers Data
You can also see this notebook rendered on github: https://github.com/eggie5/ipython-notebooks/blob/master/avengers/Avengers.ipynb
Life and Death of the Avengers
The Avengers are a well-known and widely loved team of superheroes in the Marvel universe that were introduced in the 1960's in the original comic book series. They've since become popularized again through the recent Disney movies as part of the new Marvel Cinematic Universe.
The team at FiveThirtyEight wanted to dissect the deaths of the Avengers in the comics over the years. The writers were known to kill off and revive many of the superheroes so they were curious to know what data they could grab from the Marvel Wikia site, a fan-driven community site, to explore further. To learn how they collected their data, available on their Github repo, read the writeup they published on their site.
Exploring the Data
While the FiveThirtyEight team has done a wonderful job acquiring this data, the data still has some inconsistencies. Your mission, if you choose to accept it, is to clean up their dataset so it can be more useful for analysis in Pandas. First things first, let's read our dataset into Padas as a DataFrame and preview the first 5 rows to get a better sense of our data.
End of explanation
"""
import matplotlib.pyplot as plt
%matplotlib inline
true_avengers = pd.DataFrame()
avengers['Year'].hist()
This is obviously a mistake in the data and you should remove all Avengers before 1960 from the DataFrame.
We only want to keep the Avengers who were introduced after 1960. Filter out all Avengers introduced before 1960 and store only the ones added in 1960 or later in `true_avengers`.
selector = avengers['Year'] > 1940
true_avengers = avengers[selector]
true_avengers['Year'].hist()
"""
Explanation: Filter out the bad years
Since the data was collected from a community site, where most of the contributions came from individual users, there's room for errors to surface in the dataset. If you plot a histogram of the values in the Year column, which describe the year that Avenger was introduced, you'll immediately notice some oddities. There are quite a few Avengers who look like they were introduced in 1900, which we know is a little fishy. The Avengers weren't introduced in the comic series until the 1960's!
End of explanation
"""
pd.options.mode.chained_assignment = None # default='warn'
columns = ['Death1', 'Death2', 'Death3', 'Death4', 'Death5']
true_avengers[columns]
def clean_row(row):
val = 0
for column in columns:
if(row[column] == "YES"):
val += 1
return val
death_column_vector = true_avengers.apply(lambda row: clean_row(row), axis=1)
true_avengers['Deaths']=death_column_vector
true_avengers.sort("Deaths", ascending=0)
"""
Explanation: Consolidating deaths
We are interested in the number of total deaths each character experienced and we'd like a field containing that distilled information. Right now, there are 5 fields (Death1 to Death5) that each contain a binary value representing if a superhero experienced that death or not. For example, a superhero can experience Death1, then Death2, etc. until they were no longer brought back to life by the writers.
We'd like to coalesce that information into just one field so we can do numerical analysis more easily.
Create a new column, Deaths, that contains the number of times each superhero died. The possible values for each death field are YES, NO, and the Pandas NaN value used to represent missing data. Keep all of the the original columns (including Death1 to Death5) and update true_avengers with the new Deaths column.
End of explanation
"""
joined_accuracy_count = int()
correct_joined_years = true_avengers[true_avengers['Years since joining'] == (2015 - true_avengers['Year'])]
joined_accuracy_count = len(correct_joined_years)
joined_accuracy_count
"""
Explanation: I sorted the output by the new Deaths column and it looks like some character "Jocasta" has died 5 times! Followed by Mar-Vell with 4 deaths.
Years since joining
For the final task, we want to know if the Years since joining field accurately reflects the Year column. If an Avenger was introduced in Year 1960, is the Years since joined value for that Avenger 55?
Calculate the number of rows where Years since joined is accurate. This challenge was created in 2015, so use that as the reference year. We want to know for how many rows Years since joined was correctly calculated as Year value subtracted from 2015.
End of explanation
"""
|
dedx/cpalice
|
training/05_LinearFits.ipynb
|
mit
|
%pylab inline
import numpy as np
import matplotlib.pyplot as plt
#Import the curve fitter from the scipy optimize package
from scipy.optimize import curve_fit
"""
Explanation: 05 Linear fits to some data
When you have a set of data that you would like to fit with some theoretical curve, you can use the SciPy optimize library to do it. When your data is linear or a subset of the data is linear, you can use a straight line fit. Here is an example of fitting a straight line to a portion of a sine curve.
End of explanation
"""
#create the data to be plotted
x = np.linspace(0, 2*np.pi, 300)
y = np.sin(x)
"""
Explanation: Create an array of points that represent a sine curve between 0 and 2$\pi$.
End of explanation
"""
#Now plot it
plt.plot(x,y,'b--')
plt.plot(x[110:180], y[110:180]) #subset of points that we will fit
plt.show()
"""
Explanation: Plot the data over the full range as a dashed line and then overlay the section of the data that looks roughly linear, which we will try to fit with a straight line.
End of explanation
"""
#Define the fit function
def func(x, m, b):
return (m*x + b)
"""
Explanation: We need to define the function that we will try to fit to this data. In this example, we will use the equation for a straight line, which has two parameters, the slope $m$ and the y-intercept $b$.
End of explanation
"""
# Make initial guess at parameters, slope then y-intercept
p0 = [-1.0, 2.0]
"""
Explanation: Before we can fit the data we need to make an initial guess at the slope and y-intercept which we can pass to the optimizer. It will start with those values and then keep trying small variations on those values until it minimizes the linear least squared difference between the data points we are trying to fit and points on the line described by those parameters.
Looking at the graph, the top-left of the solid blue curve will probably hit around $y$ = 2 when $x$ = 0 (the y-intercept). The slope is negative (decreasing y for increasing x) in the region we are fitting and it looks like the "rise" in $y$ (really it's a drop) over the "run" in $x$ appears to be about 1. Here's the parameter array we will pass to the optimizer. The order of the parameters has to match the order that they are called in the function we defined (func) so the slope comes first.
End of explanation
"""
#Call the curve fitter and have it return the optimized parameters (popt) and covariance matrix (pcov)
popt, pcov = curve_fit(func, x[110:180], y[110:180], p0)
"""
Explanation: Now call the optimizer. It will return two arrays. The first is the set of optimized parameters and the second is a matrix that shows the covariance between the parameters. Don't worry about the details of the covariance matrix for now.
End of explanation
"""
#Compute the parameter uncertainties from the covariance matrix
punc = np.zeros(len(popt))
for i in np.arange(0,len(popt)):
punc[i] = np.sqrt(pcov[i,i])
#Print the result
print "optimal parameters: ", popt
print "uncertainties of parameters: ", punc
"""
Explanation: The diagonal elements of the covariance matrix are related to the uncertainties in the optimized fit parameters - they are the square of the uncertainties, actually. Any off-diagonal elements that are non-zero tell you how correlated the parameters are. Values close to zero mean the parameters are totally uncorrelated to one another. Values close to one tell you that the parameters are tightly correlated, meaning that changing the value of one of them makes the value of the other one change by a lot. In the case of a linear fit, changing the slope of the line will change where that line intersects the y-axis, so you would expect a high degree of correlation between the slope and the y-intercept. When you are trying to understand how well a theoretical model matches data and extract parameters with some physical meaning, analyzing the covariance matrix if very important. For now, we just want the best-fit parameters and their uncertainties.
End of explanation
"""
#plot the fit result with the data
fitresult = func(x,popt[0],popt[1])
plt.plot(x,y,'b--',label="data")
plt.plot(x,fitresult,'g',label="fit")
plt.legend(loc="best")
plt.show()
"""
Explanation: Let's look at how the fit compares to the data by plotting them on top of one another. The fitresult array extends over the full range in x. You can see that a linear fit in the range of interest is pretty good, but it deviates quite significantly from the data (the sine curve) oustide that range.
End of explanation
"""
|
leoferres/prograUDD
|
clases/02-Sintaxis-de-Python.ipynb
|
mit
|
# set the midpoint
midpoint = 5
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(10):
if (i < midpoint):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print("upper:", upper)
"""
Explanation: Metadata: Estos notebooks están (más que) inspirados en el excelente trabajo de Jake van der Plass y su Whirlwind Tour Of Python. Ver A Whirlwind Tour of Python by Jake VanderPlas (O’Reilly). Copyright 2016 O’Reilly Media, Inc., 978-1-491-96465-1.". Estos notebooks están protegidos con la misma licencia de los originales, Creative Commons 0. Todas las notas están disponibles en PrograUDD1.
La sintaxis de Python
La sintaxis de Python es considerada tan limpia que a veces a Python se le dice "executable pseudo-code", e hizo que su adaptación haya sido muy extensa.
Considere el siguiente código:
End of explanation
"""
print(2*(3+4))
print(2*3+4)
print((2*3)+4)
"""
Explanation: Este pequeño script muestra algunos aspectos importantes de la sintaxis de Python.
Comentarios
Los comentarios en Python empiezan con un "pound", "hash" o numeral # y cualquier cosa que lo siga hasta el final de la línea es ignorada por el intérprete. Es decir, pueden tener comentarios que toman toda la línea, o sólo parte de ella.
En el ejemplo de arriba hay tres comentarios:
```python
set the midpoint
make two empty lists
lower = []; upper = []
or
lower = []; upper = [] # make two empty lists
split the numbers into lower and upper
```
Python no tiene una manera de hacer comentarios multilínea como C, por ejemplo (/* ... */).
"Enter" termina una línea ejecutable (una sentencia, ?)
The line
Python
midpoint = 5
Esta operación se llama asignación, y básicamente consiste en crear una variable y darle un valor en particular: 5, en este caso. Noten que no hay nada que marque el final de la sentencia, ni {...} ni ; ni nada por el estilo (solo Enter). Esto es bastante diferente de los lenguajes de programación como C o Java, que necesitaban los ; (a lo mejor razones históricas?)
Sin embargo si por alguna razón necesitan de hecho "span" más de una línea.
Python
x = 1 + 2 + 3 + 4 +\
5 + 6 + 7 + 8
también es posible continuar en la siguiente línea si existen paréntesis, y sin usar el operador \, así:
Python
x = (1 + 2 + 3 + 4 +
5 + 6 + 7 + 8)
Los dioses del Python igual recomiendan el segundo método, en vez del símbolo de continuación: \. Alguno se anima a decir por qué?
Los espacios importan!
Vean el siguiente snippet de código:
Python
for i in range(10):
if i < midpoint:
lower.append(i)
else:
upper.append(i)
Aqui hay varias cosas que notar. Lo primero es que hay un condicional (el scope introducido por el if), y un "loop" (o ciclo), el scope introducido por el for. No es tan importante a esta altura, pero nos presenta lo que ha sido la caracteristica mas controversial de la sintaxis de Python: el espacio en blanco tiene semántica!
En otros lenguajes de programación, un bloque (scope) se define explicitamente con algun símbolo. Cuál es el simbolo que define scope en el siguiente código?
C
// C code
for(int i=0; i<100; i++) {
// curly braces indicate code block
total += i;}
y en este:
Go
package main
import "fmt"
func main() {
sum := 0
for i := 0; i < 10; i++ {
sum += i
}
fmt.Println(sum)
}
En Python los scope (o bloques de código) se determinan por indentación.
Python
for i in range(100):
# indentation indicates code block
total += i
y el scope siempre se precede con : en la línea anterior.
A mí me gusta como queda la indentación... es más limpia que la {}, pero al mismo puede producir confusion en los n00bs. Lo siguiente produce diferentes resultados:
```
if x < 4: >>> if x < 4:
........y = x * 2 ........y = x * 2
........print(x) ....print(x)
```
El código de la izquierda va a ser ejecutado sólo si el valor de x es menor que 4, mientras que el de la derecha se va a ejecutar no importa el valor de x.
A mí me parece más leible el código con espacios que con curlies, a ustedes?
Por último, el número de espacios en blanco no es importante. Sólo es necesario que sean sistemáticos, es decir, no pueden ir cambiando en un script entre 2 y 4 espacios, digamos. La convención es usar 4 espacios (y nunca "tabs"), y esta es la convención que usamos en estos notebooks. (Aunque a mí me guste los 2 espacios en C :( ).
Los espacios en blanco adentro de las líneas no tienen efecto, sólo antes de empezar la línea. Lo siguiente es equivalente:
Python
x=1+2
x = 1 + 2
x = 1 + 2
Obviamente, abusar esta flexibilidad del lenguaje afecta la legibilidad del código. La tercera línea se ve bastante espantosa. La primera en menor medida, y la del medio es la que (a mi) me hace más sentido. Comparen por ejemplo
Python
x=10**-2
con
Python
x = 10 ** -2
De hecho se sugiere poner espacios entre los operadores binarios.
Parentesis
Los parentesis son para agrupar términos y para hacer llamada a funciones con parametros. Primero, se usan para agrupar los términos de los operadores matemáticos:
End of explanation
"""
x = 3
print('first value:', x)
print('second value:', 2)
"""
Explanation: Los parentesis también se usan para pasar parámetros a una función cuando se llama. En el siguiente snippet de código, la función print() se usa para mostrar, por ej, los contenidos de una variable. La función se "llama" con un par de parentesis con los argumentos de la función adentro.
End of explanation
"""
L = [4,2,3,1]
L.sort()
print(L)
"""
Explanation: Algunas funciones se llaman sin argumentos y actuan sobre el objeto que evalúan. Los parentesis deben ser usados igual, aunque la función tenga argumentos.
End of explanation
"""
|
tleonhardt/CodingPlayground
|
dataquest/DataCleaning/Analyzing_NYC_High_School_Data.ipynb
|
mit
|
import pandas as pd
import numpy as np
import re
data_files = ["ap_2010.csv",
"class_size.csv",
"demographics.csv",
"graduation.csv",
"hs_directory.csv",
"sat_results.csv"]
data = {}
for f in data_files:
d = pd.read_csv("../data/schools/{0}".format(f))
data[f.replace(".csv", "")] = d
"""
Explanation: Read in the data
End of explanation
"""
all_survey = pd.read_csv("../data/schools/survey_all.txt", delimiter="\t", encoding='windows-1252')
d75_survey = pd.read_csv("../data/schools/survey_d75.txt", delimiter="\t", encoding='windows-1252')
survey = pd.concat([all_survey, d75_survey], axis=0)
survey["DBN"] = survey["dbn"]
survey_fields = [
"DBN",
"rr_s",
"rr_t",
"rr_p",
"N_s",
"N_t",
"N_p",
"saf_p_11",
"com_p_11",
"eng_p_11",
"aca_p_11",
"saf_t_11",
"com_t_11",
"eng_t_10",
"aca_t_11",
"saf_s_11",
"com_s_11",
"eng_s_11",
"aca_s_11",
"saf_tot_11",
"com_tot_11",
"eng_tot_11",
"aca_tot_11",
]
survey = survey.loc[:,survey_fields]
data["survey"] = survey
"""
Explanation: Read in the surveys
End of explanation
"""
data["hs_directory"]["DBN"] = data["hs_directory"]["dbn"]
def pad_csd(num):
string_representation = str(num)
if len(string_representation) > 1:
return string_representation
else:
return "0" + string_representation
data["class_size"]["padded_csd"] = data["class_size"]["CSD"].apply(pad_csd)
data["class_size"]["DBN"] = data["class_size"]["padded_csd"] + data["class_size"]["SCHOOL CODE"]
"""
Explanation: Add DBN columns
End of explanation
"""
cols = ['SAT Math Avg. Score', 'SAT Critical Reading Avg. Score', 'SAT Writing Avg. Score']
for c in cols:
data["sat_results"][c] = pd.to_numeric(data["sat_results"][c], errors="coerce")
data['sat_results']['sat_score'] = data['sat_results'][cols[0]] + data['sat_results'][cols[1]] + data['sat_results'][cols[2]]
def find_lat(loc):
coords = re.findall("\(.+, .+\)", loc)
lat = coords[0].split(",")[0].replace("(", "")
return lat
def find_lon(loc):
coords = re.findall("\(.+, .+\)", loc)
lon = coords[0].split(",")[1].replace(")", "").strip()
return lon
data["hs_directory"]["lat"] = data["hs_directory"]["Location 1"].apply(find_lat)
data["hs_directory"]["lon"] = data["hs_directory"]["Location 1"].apply(find_lon)
data["hs_directory"]["lat"] = pd.to_numeric(data["hs_directory"]["lat"], errors="coerce")
data["hs_directory"]["lon"] = pd.to_numeric(data["hs_directory"]["lon"], errors="coerce")
"""
Explanation: Convert columns to numeric
End of explanation
"""
class_size = data["class_size"]
class_size = class_size[class_size["GRADE "] == "09-12"]
class_size = class_size[class_size["PROGRAM TYPE"] == "GEN ED"]
class_size = class_size.groupby("DBN").agg(np.mean)
class_size.reset_index(inplace=True)
data["class_size"] = class_size
data["demographics"] = data["demographics"][data["demographics"]["schoolyear"] == 20112012]
data["graduation"] = data["graduation"][data["graduation"]["Cohort"] == "2006"]
data["graduation"] = data["graduation"][data["graduation"]["Demographic"] == "Total Cohort"]
"""
Explanation: Condense datasets
End of explanation
"""
cols = ['AP Test Takers ', 'Total Exams Taken', 'Number of Exams with scores 3 4 or 5']
for col in cols:
data["ap_2010"][col] = pd.to_numeric(data["ap_2010"][col], errors="coerce")
"""
Explanation: Convert AP scores to numeric
End of explanation
"""
combined = data["sat_results"]
combined = combined.merge(data["ap_2010"], on="DBN", how="left")
combined = combined.merge(data["graduation"], on="DBN", how="left")
to_merge = ["class_size", "demographics", "survey", "hs_directory"]
for m in to_merge:
combined = combined.merge(data[m], on="DBN", how="inner")
combined = combined.fillna(combined.mean())
combined = combined.fillna(0)
"""
Explanation: Combine the datasets
End of explanation
"""
def get_first_two_chars(dbn):
return dbn[0:2]
combined["school_dist"] = combined["DBN"].apply(get_first_two_chars)
"""
Explanation: Add a school district column for mapping
End of explanation
"""
correlations = combined.corr()
correlations = correlations["sat_score"]
correlations = correlations.dropna()
correlations.sort_values(ascending=False, inplace=True)
# Interesting correlations tend to have r value > .25 or < -.25
interesting_correlations = correlations[abs(correlations) > 0.25]
print(interesting_correlations)
# Setup Matplotlib to work in Jupyter notebook
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Find correlations
End of explanation
"""
# Make a bar plot of the correlations between survey fields and sat_score
correlations[survey_fields].plot.bar(figsize=(9,7))
"""
Explanation: Survey Correlations
End of explanation
"""
# Make a scatterplot of the saf_s_11 column vs the sat-score in combined
combined.plot.scatter(x='sat_score', y='saf_s_11', figsize=(9,5))
"""
Explanation: From the survey fields, two stand out due to their significant positive correlations:
* N_s - Number of student respondents
* N_p - Number of parent respondents
* aca_s_11 - Academic expectations score based on student responses
* saf_s_11 - Safety and Respect score based on student responses
Why are some possible reasons that N_s and N_p could matter?
1. Higher numbers of students and parents responding to the survey may be an indicator that students and parents care more about the school and about academics in general.
1. Maybe larger schools do better on the SAT and higher numbers of respondents is just indicative of a larger overall student population.
1. Maybe there is a hidden underlying correlation, say that rich students/parents or white students/parents are more likely to both respond to surveys and to have the students do well on the SAT.
1. Maybe parents who care more will fill out the surveys and get their kids to fill out the surveys and these same parents will push their kids to study for the SAT.
Safety and SAT Scores
Both student and teacher perception of safety and respect at school correlate significantly with SAT scores. Let's dig more into this relationship.
End of explanation
"""
# Find the average values for each column for each school_dist in combined
districts = combined.groupby('school_dist').agg(np.mean)
# Reset the index of districts, making school_dist a column again
districts.reset_index(inplace=True)
# Make a map that shows afety scores by district
from mpl_toolkits.basemap import Basemap
plt.figure(figsize=(8,8))
# Setup the Matplotlib Basemap centered on New York City
m = Basemap(projection='merc',
llcrnrlat=40.496044,
urcrnrlat=40.915256,
llcrnrlon=-74.255735,
urcrnrlon=-73.700272,
resolution='i')
m.drawmapboundary(fill_color='white')
m.drawcoastlines(color='blue', linewidth=.4)
m.drawrivers(color='blue', linewidth=.4)
# Convert the lat and lon columns of districts to lists
longitudes = districts['lon'].tolist()
latitudes = districts['lat'].tolist()
# Plot the locations
m.scatter(longitudes, latitudes, s=50, zorder=2, latlon=True,
c=districts['saf_s_11'], cmap='summer')
# Add colorbar
# add colorbar.
cbar = m.colorbar(location='bottom',pad="5%")
cbar.set_label('saf_s_11')
"""
Explanation: So a high saf_s_11 student safety and respect score doesn't really have any predictive value regarding SAT score. However, a low saf_s_11 has a very strong correlation with low SAT scores.
Map out Safety Scores
End of explanation
"""
# Make a plot of the correlations between racial cols and sat_score
race_cols = ['white_per', 'asian_per', 'black_per', 'hispanic_per']
race_corr = correlations[race_cols]
race_corr.plot(kind='bar')
"""
Explanation: So it looks like the safest schools are in Manhattan, while the least safe schools are in Brooklyn.
This jives with crime statistics by borough
Race and SAT Scores
There are a few columsn that indicate the percentage of each race at a given school:
* white_per
* asian_per
* black_per
* hispanic_per
By plotting out the correlations between these columns and sat_score, we can see if there are any racial differences in SAT performance.
End of explanation
"""
# Explore schools with low SAT scores and a high hispanic_per
combined.plot.scatter(x='hispanic_per', y='sat_score')
"""
Explanation: A higher percentage of white and asian students correlates positively with SAT scores and a higher percentage of black or hispanic students correlates negatively with SAT scores. I wouldn't say any of this is suprising. My guess would be that there is an underlying economic factor which is the cause - white and asian neighborhoods probably have a higher median household income and more well funded schools than black or hispanic neighborhoods.
End of explanation
"""
# Research any schools with a greater than 95% hispanic_per
high_hispanic = combined[combined['hispanic_per'] > 95]
# Find the names of schools from the data
high_hispanic['SCHOOL NAME']
"""
Explanation: The above scatterplot shows that a low hispanic percentage isn't particularly predictive of SAT score. However, a high hispanic percentage is highly predictive of a low SAT score.
End of explanation
"""
# Research any schools with less than 10% hispanic_per and greater than
# 1800 average SAT score
high_sat_low_hispanic = combined[(combined['hispanic_per'] < 10) &
(combined['sat_score'] > 1800)]
high_sat_low_hispanic['SCHOOL NAME']
"""
Explanation: The above schools appear to contain a lot of international schools focused on recent immigrants who are learning English as a 2nd language. It makes sense that they would have a harder time on the SAT which is given soley in English.
End of explanation
"""
# Investigate gender differences in SAT scores
gender_cols = ['male_per', 'female_per']
gender_corr = correlations[gender_cols]
gender_corr
# Make a plot of the gender correlations
gender_corr.plot.bar()
"""
Explanation: Most of the schools above appear to be specialized science and technology schools which receive extra funding and require students to do well on a standardized test before being admitted. So it is reasonable that students at these schools would have a high average SAT score.
Gender and SAT Scores
There are two columns that indicate the percentage of each gender at a school:
* male_per
* female_per
End of explanation
"""
# Investigate schools with high SAT scores and a high female_per
combined.plot.scatter(x='female_per', y='sat_score')
"""
Explanation: In the plot above, we can see that a high percentage of females at a school positively correlates with SAT score, whereas a high percentage of males at a school negatively correlates with SAT score. Neither correlation is extremely strong.
More data would be required before I was wiling to say that this is a significant effect.
End of explanation
"""
# Research any schools with a greater than 60% female_per, and greater
# than 1700 average SAT score.
high_female_high_sat = combined[(combined['female_per'] > 60) &
(combined['sat_score'] > 1700)]
high_female_high_sat['SCHOOL NAME']
"""
Explanation: The above plot appears to show that either very low or very high percentage of females in a school leads to a low average SAT score. However, a percentage in the range 40 to 80 or so can lead to good scores. There doesn't appear to be a strong overall correlation.
End of explanation
"""
# Compute the percentage of students in each school that took the AP exam
combined['ap_per'] = combined['AP Test Takers '] / combined['total_enrollment']
# Investigate the relationship between AP scores and SAT scores
combined.plot.scatter(x='ap_per', y='sat_score')
"""
Explanation: These schools appears to be very selective liberal arts schools that have high academic standards.
AP Scores vs SAT Scores
The Advanced Placement (AP) exams are exams that high schoolers take in order to gain college credit. AP exams can be taken in many different subjects, and passing the AP exam means that colleges may grant you credits.
It makes sense that the number of students who took the AP exam in a school and SAT scores would be highly correlated. Let's dig into this relationship more.
Since total_enrollment is highly correlated with sat_score, we don't want to bias our results, so we'll instead look at the percentage of students in each school who took at least one AP exam.
End of explanation
"""
|
ceos-seo/data_cube_notebooks
|
notebooks/water/detection/water_interoperability_similarity.ipynb
|
apache-2.0
|
import sys
import os
sys.path.append(os.environ.get('NOTEBOOK_ROOT'))
%matplotlib inline
import sys
import datacube
import numpy
import numpy as np
import xarray as xr
from xarray.ufuncs import isnan as xr_nan
import pandas as pd
import matplotlib.pyplot as plt
"""
Explanation: <a id="water_interoperability_similarity_top"></a>
Water Interoperability Similarity
<hr>
Background
There are a few water classifiers for Landsat, Sentinel-1, and Sentinel-2. We will examine WOfS for Landsat, thresholding for Sentinel-1, and WOfS for Sentinel-2.
Although WOfS performs well on clear water bodies, it can misclassify murky water bodies as not water. WASARD or Sentinel-1 thresholding generally perform equally well or better than WOfS – especially on murky water bodies.
Because WOfS uses an optical data source (Landsat), it often does not have data to make water classifications due to cloud occlusion. The same limitation applies to Sentinel-2 water detection.
The main reasons to use multiple data sources in the same water detection analysis are to increase temporal resolution and account for missing data.
<hr>
Notebook Description
This notebook checks how similar water classifications are among a selected set of sources (e.g. WOfS for Landsat, thresholding for Sentinel-1, etc.).
These are the steps followed:
Determine the dates of coincidence of data for the selected sensors using the CEOS COVE tool.
Acquire water classifications for each sensor.
Show the RGB representation of Time Slices and Water Classifications
Show the per-time-slice percent of cloud according to each sensor as a line plot.
Show the per-time-slice percent of water (masked with the intersected clean mask) according to each sensor as a line plot.
Show the per-time-slice similarity (% of matching pixels) of each pair of sensors as a line plot.
<hr>
Index
Import Dependencies and Connect to the Data Cube
Define the Extents of the Analysis
Determine Dates of Coincidence for the Selected Sensors Using the COVE Tool
Get Water Classifications for Each Sensor
Determine the time range of overlapping data for all sensors.
Determine the dates of close scenes among the sensors.
Get Landsat 8 water classifications
Get Sentinel-1 water classifications
Get Sentinel-2 water classifications
Show the RGB Representation of Time Slices and Water Classifications
Show the Per-time-slice Percent of Water According to Each Sensor as a Line Plot
Show the Per-time-slice Similarity (% of Matching Pixels) of Each Pair of Sensors as a Line Plot
Getting started
To run this analysis, run all the cells in the notebook, starting with the "Load packages" cell.
After finishing the analysis, return to the "Analysis parameters" cell, modify some values (e.g. choose a different location or time period to analyse) and re-run the analysis.
<span id="water_interoperability_similarity_import">Import Dependencies and Connect to the Data Cube ▴</span>
Load packages
Load key Python packages and supporting functions for the analysis.
End of explanation
"""
dc = datacube.Datacube(app="water_interoperability_similarity")
"""
Explanation: Connect to the datacube
Activate the datacube database, which provides functionality for loading and displaying stored Earth observation data.
End of explanation
"""
# Define the area of interest
# Obuasi, Ghana
# latitude = (6.10, 6.26)
# longitude = (-1.82, -1.66)
# latitude = (6.1582, 6.2028)
# longitude = (-1.7295, -1.6914)
# DEBUG - small area of Obuasi for quick loading
# latitude = (6.1982, 6.2028)
# longitude = (-1.7295, -1.6914)
# Tono Dam, Ghana
latitude = (10.8600, 10.9150)
longitude = (-1.1850, -1.1425)
# The time range in which we want to determine
# dates of close scenes among sensors.
time_extents = ('2014-01-01', '2018-12-31')
from utils.data_cube_utilities.dc_display_map import display_map
display_map(longitude, latitude)
"""
Explanation: <span id="water_interoperability_similarity_define_extents">Define the Extents of the Analysis ▴</span>
Analysis parameters
The following cell sets the parameters, which define the area of interest and the length of time to conduct the analysis over.
The parameters are
latitude: The latitude range to analyse (e.g. (-11.288, -11.086)).
For reasonable loading times, make sure the range spans less than ~0.1 degrees.
longitude: The longitude range to analyse (e.g. (130.324, 130.453)).
For reasonable loading times, make sure the range spans less than ~0.1 degrees.
If running the notebook for the first time, keep the default settings below.
This will demonstrate how the analysis works and provide meaningful results.
The example covers an area around Obuasi, Ghana.
To run the notebook for a different area, make sure Landsat 8, Sentinel-1, and Sentinel-2 data is available for the chosen area.
End of explanation
"""
common_load_params = \
dict(latitude=latitude, longitude=longitude,
group_by='solar_day',
output_crs="epsg:4326",
resolution=(-0.00027,0.00027),
dask_chunks={'latitude': 2000, 'longitude':2000, 'time':1})
# The minimum percent of data that a time slice must have
# to be kept in this analysis
MIN_PCT_DATA = 0
"""
Explanation: <span id="water_interoperability_similarity_determine_coincidence">Determine Dates of Coincidence for the Selected Sensors Using the COVE Tool ▴</span>
We used a tool from the Committee on Earth Observations (CEOS) called the CEOS Visualization Environment (COVE). This tool has several applications, such as Acquisition Forecaster predicts when and where future acquisitions (images) will occur, and Coverage Analyzer which shows when and where acquisitions have occurred in the past.
For this analysis, we used the Coincident Calculator to determine when Landsat 8, Sentinel-1, and Sentinel-2 have close dates so we can compare them on a per-time-slice basis.
The COVE Coincident Calculator allows users to specify the sensors to determine coincidence for. For this analysis, we first determined the dates of coincidence of Landsat 8 and Sentinel-2. We then determined dates which are close to those which have Sentinel-1 data.
We first found dates for which both Landsat 8 and Sentinel-2 data is available for the time range and area of interest, which were the following 8 dates:
[April 22, 2017, July 11, 2017, September 29, 2017, December 18, 2017, March 8, 2018, May 27, 2018, August 15, 2018, November 3, 2018]
Then we found dates for which Landsat 8 and Sentinel-1 data is available for the time range and area of interest, and then found the subset of closely matching dates, which were the following 6 dates: [July 12, 2017 (off 1), September 29, 2017, December 15, 2017 (off 3), March 9, 2018 (off 1), May 27, 2018, August 12, 2018 (off 3)] These are the daets we use in this analysis.
<span id="water_interoperability_similarity_get_water_classifications">Get Water Classifications for Each Sensor ▴</span>
End of explanation
"""
metadata = {}
metadata['Landsat 8'] = \
dc.load(**common_load_params,
product='ls8_lasrc_ghana',
time=time_extents)
metadata['Sentinel-1'] = \
dc.load(**common_load_params,
product='s1monthly_gamma0_ghana',
time=time_extents)
s2a_meta = dc.load(**common_load_params,
product='s2a_msil2a',
time=time_extents)
s2b_meta = dc.load(**common_load_params,
product='s2b_msil2a',
time=time_extents)
metadata['Sentinel-2'] = xr.concat((s2a_meta, s2b_meta), dim='time').sortby('time')
del s2a_meta, s2b_meta
ls8_time_rng = metadata['Landsat 8'].time.values[[0,-1]]
s2_time_rng = metadata['Sentinel-2'].time.values[[0,-1]]
time_rng = np.stack((ls8_time_rng, s2_time_rng))
overlapping_time = time_rng[:,0].max(), time_rng[:,1].min()
"""
Explanation: Determine the time range of overlapping data for all sensors.
End of explanation
"""
for sensor in metadata:
metadata[sensor] = metadata[sensor].sel(time=slice(*overlapping_time))
"""
Explanation: Limit the metadata to check for close scenes to the overlapping time range.
End of explanation
"""
# Constants #
# The maximum number of days of difference between scenes
# from sensors for those scenes to be considered approximately coincident.
# The Sentinel-1 max date diff is set high enough to allow any set of dates
# from the other sensors to match with one of its dates since we will
# select its matching dates with special logic later.
MAX_NUM_DAYS_DIFF = {'Landsat 8':4, 'Sentinel-1':30}
# End Constants #
# all_times
num_datasets = len(metadata)
ds_names = list(metadata.keys())
first_ds_name = ds_names[0]
# All times for each dataset.
ds_times = {ds_name: metadata[ds_name].time.values for ds_name in ds_names}
# The time indices for each dataset's sorted time dimension
# currently being compared.
time_inds = {ds_name: 0 for ds_name in ds_names}
corresponding_times = {ds_name: [] for ds_name in ds_names}
# The index of the dataset in `metadata` to compare times against the first.
oth_ds_ind = 1
oth_ds_name = ds_names[oth_ds_ind]
oth_ds_time_ind = time_inds[oth_ds_name]
# For each time in the first dataset, find any
# closely matching dates in the other datasets.
for first_ds_time_ind, first_ds_time in enumerate(ds_times[first_ds_name]):
time_inds[first_ds_name] = first_ds_time_ind
# Find a corresponding time in this other dataset.
while True:
oth_ds_name = ds_names[oth_ds_ind]
oth_ds_time_ind = time_inds[oth_ds_name]
# If we've checked all dates for the other dataset,
# check the next first dataset time.
if oth_ds_time_ind == len(ds_times[oth_ds_name]):
break
oth_ds_time = metadata[ds_names[oth_ds_ind]].time.values[oth_ds_time_ind]
time_diff = (oth_ds_time - first_ds_time).astype('timedelta64[D]').astype(int)
# If this other dataset time is too long before this
# first dataset time, check the next other dataset time.
if time_diff <= -MAX_NUM_DAYS_DIFF[oth_ds_name]:
oth_ds_time_ind += 1
time_inds[ds_names[oth_ds_ind]] = oth_ds_time_ind
continue
# If this other dataset time is within the acceptable range
# of the first dataset time...
elif abs(time_diff) <= MAX_NUM_DAYS_DIFF[oth_ds_name]:
# If there are more datasets to find a corresponding date for
# these current corresponding dates, check those datasets.
if oth_ds_ind < len(ds_names)-1:
oth_ds_ind += 1
continue
else: # Otherwise, record this set of corresponding dates.
for ds_name in ds_names:
corresponding_times[ds_name].append(ds_times[ds_name][time_inds[ds_name]])
# Don't use these times again.
time_inds[ds_name] = time_inds[ds_name] + 1
oth_ds_ind = 1
break
# If this other dataset time is too long after this
# first dataset time, go to the next first dataset time.
else:
oth_ds_ind -= 1
break
# convert to pandas datetime
for sensor in corresponding_times:
for ind in range(len(corresponding_times[sensor])):
corresponding_times[sensor][ind] = \
pd.to_datetime(corresponding_times[sensor][ind])
"""
Explanation: Determine the dates of close scenes among the sensors
End of explanation
"""
ls8_pd_datetimes = corresponding_times['Landsat 8']
s1_pd_datetimes = pd.to_datetime(metadata['Sentinel-1'].time.values)
for time_ind, ls8_time in enumerate(ls8_pd_datetimes):
matching_s1_time_ind = [s1_time_ind for (s1_time_ind, s1_time)
in enumerate(s1_pd_datetimes) if
s1_time.month == ls8_time.month][0]
matching_s1_time = metadata['Sentinel-1'].time.values[matching_s1_time_ind]
corresponding_times['Sentinel-1'][time_ind] = pd.to_datetime(matching_s1_time)
"""
Explanation: The Sentinel-1 data is a monthly composite, so we need special logic for choosing data from it.
End of explanation
"""
ls8_times = corresponding_times['Landsat 8']
s1_times = corresponding_times['Sentinel-1']
s2_times = corresponding_times['Sentinel-2']
ls8_data = []
ls8_data = dc.load(**common_load_params,
product='ls8_usgs_sr_scene',
time=(ls8_times[0], ls8_times[-1]),
dask_chunks = {'time': 1})
ls8_data = ls8_data.sel(time=corresponding_times['Landsat 8'], method='nearest')
print(f"Subset the data to {len(ls8_data.time)} times of near coincidence.")
"""
Explanation: Get Landsat 8 water classifications
Load the data
End of explanation
"""
from water_interoperability_utils.clean_mask import ls8_unpack_qa
ls8_data_mask = (ls8_data != -9999).to_array().any('variable')
ls8_clear_mask = ls8_unpack_qa(ls8_data.pixel_qa, 'clear')
ls8_water_mask = ls8_unpack_qa(ls8_data.pixel_qa, 'water')
ls8_clean_mask = (ls8_clear_mask | ls8_water_mask) & ls8_data_mask
del ls8_clear_mask, ls8_water_mask
"""
Explanation: Acquire the clean mask
End of explanation
"""
from water_interoperability_utils.dc_water_classifier import wofs_classify
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
ls8_water = wofs_classify(ls8_data).wofs
ls8_water = ls8_water.where(ls8_clean_mask)
"""
Explanation: Acquire water classifications
End of explanation
"""
s1_data = dc.load(**common_load_params,
product='sentinel1_ghana_monthly',
time=(s1_times[0], s1_times[-1]),
dask_chunks = {'time': 1})
s1_data = s1_data.sel(time=corresponding_times['Sentinel-1'], method='nearest')
print(f"Subset the data to {len(s1_data.time)} times of near coincidence.")
"""
Explanation: Get Sentinel-1 water classifications
Load the data
End of explanation
"""
s1_not_nan_da = ~xr_nan(s1_data).to_array()
s1_clean_mask = s1_not_nan_da.min('variable')
del s1_not_nan_da
"""
Explanation: Acquire the clean mask
End of explanation
"""
from sklearn.impute import SimpleImputer
from skimage.filters import try_all_threshold, threshold_otsu
thresh_vv = threshold_otsu(s1_data.vv.values)
thresh_vh = threshold_otsu(s1_data.vh.values)
binary_vv = s1_data.vv.values < thresh_vv
binary_vh = s1_data.vh.values < thresh_vh
s1_water = xr.DataArray(binary_vv & binary_vh, coords=s1_data.vv.coords,
dims=s1_data.vv.dims, attrs=s1_data.vv.attrs)
s1_water = s1_water.where(s1_clean_mask)
"""
Explanation: Acquire water classifications
End of explanation
"""
s2a_data = dc.load(**common_load_params,
product='s2a_msil2a',
time=(s2_times[0], s2_times[-1]),
dask_chunks = {'time': 1})
s2b_data = dc.load(**common_load_params,
product='s2b_msil2a',
time=(s2_times[0], s2_times[-1]),
dask_chunks = {'time': 1})
s2_data = xr.concat((s2a_data, s2b_data), dim='time').sortby('time')
s2_data = s2_data.sel(time=corresponding_times['Sentinel-2'], method='nearest')
print(f"Subsetting the data to {len(s2_data.time)} times of near coincidence.")
"""
Explanation: Get Sentinel-2 water classifications
Acquire the data
End of explanation
"""
# See figure 3 on this page for more information about the
# values of the scl data for Sentinel-2:
# https://earth.esa.int/web/sentinel/technical-guides/sentinel-2-msi/level-2a/algorithm
s2_clean_mask = s2_data.scl.isin([1, 2, 3, 4, 5, 6, 7, 10, 11])
"""
Explanation: Acquire the clean mask
End of explanation
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=Warning)
s2_water = wofs_classify(s2_data.rename(
{'nir_1': 'nir', 'swir_1': 'swir1', 'swir_2': 'swir2'})).wofs
s2_water = s2_water.where(s2_clean_mask)
ls8_data = ls8_data.compute()
ls8_clean_mask = ls8_clean_mask.compute()
s1_data = s1_data.compute()
s1_clean_mask = s1_clean_mask.compute()
s2_data = s2_data.compute()
s2_clean_mask = s2_clean_mask.compute()
"""
Explanation: Acquire water classifications
End of explanation
"""
intersected_clean_mask = xr.DataArray((ls8_clean_mask.values &
s1_clean_mask.values &
s2_clean_mask.values),
coords=ls8_clean_mask.coords,
dims=ls8_clean_mask.dims)
# Mask the water classes.
ls8_water = ls8_water.where(intersected_clean_mask.values)
s1_water = s1_water.where(intersected_clean_mask.values)
s2_water = s2_water.where(intersected_clean_mask.values)
# Remove any times with no data for any sensor.
times_to_keep_mask = (intersected_clean_mask.sum(['latitude', 'longitude']) / \
intersected_clean_mask.count(['latitude', 'longitude'])) > MIN_PCT_DATA
# The time indices to keep for visualization.
time_inds_subset = np.arange(len(ls8_data.time))[times_to_keep_mask.values]
intersected_clean_mask_subset = \
intersected_clean_mask.isel(time=time_inds_subset)
ls8_data_subset = ls8_data.isel(time=time_inds_subset)
ls8_clean_mask_subset = ls8_clean_mask.isel(time=time_inds_subset)
ls8_water_subset = ls8_water.isel(time=time_inds_subset)
s1_data_subset = s1_data.isel(time=time_inds_subset)
s1_clean_mask_subset = s1_clean_mask.isel(time=time_inds_subset)
s1_water_subset = s1_water.isel(time=time_inds_subset)
s2_data_subset = s2_data.isel(time=time_inds_subset)
s2_clean_mask_subset = s2_clean_mask.isel(time=time_inds_subset)
s2_water_subset = s2_water.isel(time=time_inds_subset)
"""
Explanation: <span id="water_interoperability_similarity_images">Show the RGB Representation of Time Slices and Water Classifications ▴</span>
Obtain the intersected clean mask for the sensors.
End of explanation
"""
water_alpha = 0.9
for time_ind in range(len(ls8_data_subset.time)):
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
# Mask out the water from the RGB so that its background segment is white instead of the RGB.
ls8_data_subset.where(ls8_water_subset != 1)[['red', 'green', 'blue']].isel(time=time_ind).to_array().plot.imshow(ax=ax[0], vmin=0, vmax=1750)
ls8_only_water = ls8_water_subset.where(ls8_water_subset == 1)
ls8_only_water.isel(time=time_ind).plot.imshow(ax=ax[0], cmap='Blues', alpha=water_alpha,
vmin=0, vmax=1, add_colorbar=False)
ax[0].set_xlabel('Longitude')
ax[0].set_ylabel('Latitude')
ax[0].set_title(f"Landsat 8 " \
f"({numpy.datetime_as_string(ls8_data_subset.time.values[time_ind], unit='D')})")
s1_data_subset.where(s1_water_subset != 1).vv.isel(time=time_ind).plot.imshow(ax=ax[1], cmap='gray', vmin=-30, vmax=-0, add_colorbar=False)
s1_only_water = s1_water_subset.where(s1_water_subset == 1)
s1_only_water.isel(time=time_ind).plot.imshow(ax=ax[1], cmap='Blues', alpha=water_alpha,
vmin=0, vmax=1, add_colorbar=False)
ax[1].set_xlabel('Longitude')
ax[1].set_ylabel('Latitude')
ax[1].set_title(f"Sentinel-1 " \
f"({numpy.datetime_as_string(s1_data_subset.time.values[time_ind], unit='D')})")
s2_data_subset.where(s2_water_subset != 1)[['red', 'green', 'blue']].isel(time=time_ind).to_array().plot.imshow(ax=ax[2], vmin=0, vmax=2500)
s2_only_water = s2_water_subset.where(s2_water_subset == 1)
s2_only_water.isel(time=time_ind).plot.imshow(ax=ax[2], cmap='Blues', alpha=water_alpha,
vmin=0, vmax=1, add_colorbar=False)
ax[2].set_xlabel('Longitude')
ax[2].set_ylabel('Latitude')
ax[2].set_title(f"Sentinel-2 " \
f"({numpy.datetime_as_string(s2_data_subset.time.values[time_ind], unit='D')})")
plt.tight_layout()
plt.show()
"""
Explanation: Show the data and water classifications for each sensor as the data will be compared among them (an intersection).
End of explanation
"""
ls8_water_subset_pct = \
ls8_water_subset.sum(['latitude', 'longitude']) / \
ls8_water_subset.count(['latitude', 'longitude']).compute()
s1_water_subset_pct = \
s1_water_subset.sum(['latitude', 'longitude']) / \
s1_water_subset.count(['latitude', 'longitude']).compute()
s1_water_subset_pct.time.values = ls8_water_subset_pct.time.values
s2_water_subset_pct = \
s2_water_subset.sum(['latitude', 'longitude']) / \
s2_water_subset.count(['latitude', 'longitude']).compute()
s2_water_subset_pct.time.values = ls8_water_subset_pct.time.values
import matplotlib.ticker as mtick
ax = plt.gca()
plot_format = dict(ms=6, marker='o', alpha=0.5)
(ls8_water_subset_pct*100).plot(ax=ax, **plot_format, label='Landsat 8')
(s1_water_subset_pct*100).plot(ax=ax, **plot_format, label='Sentinel-1')
(s2_water_subset_pct*100).plot(ax=ax, **plot_format, label='Sentinel-2')
plt.ylim(0,50)
ax.set_xlabel('Time')
ax.set_ylabel('Percent of Intersecting Data That Is Water')
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
plt.legend()
plt.title('Water %')
plt.show()
"""
Explanation: <span id="water_interoperability_similarity_pct_water_line_plot">Show the Per-time-slice Percent of Water According to Each Sensor as a Line Plot ▴</span>
End of explanation
"""
from itertools import combinations
ax = plt.gca()
water_das = [('Landsat_8', ls8_water_subset),
('Sentinel-1', s1_water_subset),
('Sentinel-2', s2_water_subset)]
for i, ((sensor_1, water_1), (sensor_2, water_2)) in enumerate(combinations(water_das, 2)):
lat_dim_ind = np.argmax(np.array(water_1.dims) == 'latitude')
lon_dim_ind = np.argmax(np.array(water_1.dims) == 'longitude')
similarity = (water_1.values == water_2.values).sum(axis=(lat_dim_ind, lon_dim_ind)) / \
intersected_clean_mask_subset.sum(['latitude', 'longitude'])
(similarity*100).plot.line(ax=ax, **plot_format, label=f'{sensor_1} vs {sensor_2}')
ax.set_xlabel('Time')
ax.set_ylabel('Percent of Same Classifications')
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
plt.legend()
plt.title('Similarity')
plt.show()
"""
Explanation: <span id="water_interoperability_similarity_pct_similarity_line_plot">Show the Per-time-slice Similarity (% of Matching Pixels) of Each Pair of Sensors as a Line Plot ▴</span>
End of explanation
"""
|
joekasp/ionic_liquids
|
ionic_liquids/examples/.ipynb_checkpoints/Example_Workflow-checkpoint.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator as Calculator
"""
Explanation: Example of the Workflow
This is an example of main.py in the ionic_liquids folder. I will first have to import the libraries that are necessary to run this program, including train_test_split that allows for splitting datasets into training sets and test sets necessary to run machine learning.
End of explanation
"""
FILENAME = 'inputdata2.xlsx'
MODEL = 'mlp_regressor'
DIRNAME = 'my_test'
"""
Explanation: For this example, I will utilize the following filename, machine learning model, and directory name to save the model.
End of explanation
"""
def read_data(filename):
"""
Reads data in from given file to Pandas DataFrame
Inputs
-------
filename : string of path to file
Returns
------
df : Pandas DataFrame
"""
cols = filename.split('.')
name = cols[0]
filetype = cols[1]
if (filetype == 'csv'):
df = pd.read_csv(filename)
elif (filetype in ['xls', 'xlsx']):
df = pd.read_excel(filename)
else:
raise ValueError('Filetype not supported')
# clean the data if necessary
df['EC_value'], df['EC_error'] = zip(*df['ELE_COD'].map(lambda x: x.split('±')))
df = df.drop('EC_error', 1)
df = df.drop('ELE_COD', 1)
return df
df = read_data(FILENAME)
"""
Explanation: The following step prepares the data to be read in the machine_learning methods. First, we need to get the data into a readable form and parse, if necessary. In our case, we need to parse the values and errors in the last column of the FILENAME.
End of explanation
"""
def molecular_descriptors(data):
"""
Use RDKit to prepare the molecular descriptor
Inputs
------
data: dataframe, cleaned csv data
Returns
------
prenorm_X: normalized input features
Y: experimental electrical conductivity
"""
n = data.shape[0]
# Choose which molecular descriptor we want
list_of_descriptors = ['NumHeteroatoms', 'ExactMolWt',
'NOCount', 'NumHDonors',
'RingCount', 'NumAromaticRings',
'NumSaturatedRings', 'NumAliphaticRings']
# Get the molecular descriptors and their dimension
calc = Calculator(list_of_descriptors)
D = len(list_of_descriptors)
d = len(list_of_descriptors)*2 + 4
Y = data['EC_value']
X = np.zeros((n, d))
X[:, -3] = data['T']
X[:, -2] = data['P']
X[:, -1] = data['MOLFRC_A']
for i in range(n):
A = Chem.MolFromSmiles(data['A'][i])
B = Chem.MolFromSmiles(data['B'][i])
X[i][:D] = calc.CalcDescriptors(A)
X[i][D:2*D] = calc.CalcDescriptors(B)
prenorm_X = pd.DataFrame(X,columns=['NUM', 'NumHeteroatoms_A',
'MolWt_A', 'NOCount_A','NumHDonors_A',
'RingCount_A', 'NumAromaticRings_A',
'NumSaturatedRings_A',
'NumAliphaticRings_A',
'NumHeteroatoms_B', 'MolWt_B',
'NOCount_B', 'NumHDonors_B',
'RingCount_B', 'NumAromaticRings_B',
'NumSaturatedRings_B',
'NumAliphaticRings_B',
'T', 'P', 'MOLFRC_A'])
prenorm_X = prenorm_X.drop('NumAliphaticRings_A', 1)
prenorm_X = prenorm_X.drop('NumAliphaticRings_B', 1)
return prenorm_X, Y
X, y = molecular_descriptors(df)
"""
Explanation: Secondly, we will create a X matrix and y vector that are send to the molecular descriptor function in utils.py. The X matrix will hold all of our inputs for the machine learning whereas y vector will be the actual electronic conductivity values.
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
"""
Explanation: We can prepare our testing and training data set for the machine learning calling using train_test_split, a function called from sklearn module of python.
End of explanation
"""
def normalization(data, means=None, stdevs=None):
"""
Normalizes the data using the means and standard
deviations given, calculating them otherwise.
Returns the means and standard deviations of columns.
Inputs
------
data : Pandas DataFrame
means : optional numpy argument of column means
stdevs : optional numpy argument of column st. devs
Returns
------
normed : the normalized DataFrame
means : the numpy row vector of column means
stdevs : the numpy row vector of column st. devs
"""
cols = data.columns
data = data.values
if (means is None) or (stdevs is None):
means = np.mean(data, axis=0)
stdevs = np.std(data, axis=0, ddof=1)
else:
means = np.array(means)
stdevs = np.array(stdevs)
# handle special case of one row
if (len(data.shape) == 1) or (data.shape[0] == 1):
for i in range(len(data)):
data[i] = (data[i] - means[i]) / stdevs[i]
else:
for i in range(data.shape[1]):
data[:,i] = (data[:,i] - means[i]*np.ones(data.shape[0])) / stdevs[i]
normed = pd.DataFrame(data, columns=cols)
return normed, means, stdevs
X_train, X_mean, X_std = normalization(X_train)
X_test, trash, trash = normalization(X_test, X_mean, X_std)
"""
Explanation: Followingly, the program will normalize the testing data using the training data set. This will also provide us with the mean value and standard deviation of X.
End of explanation
"""
if (MODEL.lower() == 'mlp_regressor'):
obj = methods.do_MLP_regressor(X_train, y_train)
elif (MODEL.lower() == 'lasso'):
obj = methods.do_lasso(X_train, y_train)
elif (MODEL.lower() == 'svr'):
obj = methods.do_svr(X_train, y_train)
else:
raise ValueError("Model not supported")
"""
Explanation: We coded three models into our program: MLP_regressor, LASSO, and SVR. Each of these models are well documented in sklearn, a library in python. In the actual program, you can use all three models, but for the purpose of this example, we chose mlp_regressor. The ValueError will only raise if you do not use one of the three models. A good example is if you were to change the MODEL used to 'MLP_classifier'.
End of explanation
"""
def save_model(obj, X_mean, X_stdev, X=None, y=None, dirname='default'):
"""
Save the trained regressor model to the file
Input
------
obj: model object
X_mean : mean for each column of training X
X_stdev : stdev for each column of training X
X : Predictor matrix
y : Response vector
dirname : the directory to save contents
Returns
------
None
"""
if (dirname == 'default'):
timestamp = str(datetime.now())[:19]
dirname = 'model_'+timestamp.replace(' ', '_')
else:
pass
if not os.path.exists(dirname):
os.makedirs(dirname)
filename = dirname + '/model.pkl'
joblib.dump(obj, filename)
joblib.dump(X_mean, dirname+'/X_mean.pkl')
joblib.dump(X_stdev, dirname+'/X_stdev.pkl')
if (X is not None):
filename = dirname + '/X_data.pkl'
joblib.dump(X, filename)
else:
pass
if (y is not None):
filename = dirname + '/y_data.pkl'
joblib.dump(y, filename)
else:
pass
return
save_model(obj, X_mean, X_std, X_train, y_train, dirname=DIRNAME)
"""
Explanation: After the method is called , it will be saved to an objective. This objective is saved along with the mean and standard deviation and the training set in the directory, named DIRNAME. This step is not as important for the workflow but vital to the success of the graphical user interface.
End of explanation
"""
def parity_plot(y_pred, y_act):
"""
Creates a parity plot
Input
-----
y_pred : predicted values from the model
y_act : 'true' (actual) values
Output
------
fig : matplotlib figure
"""
fig = plt.figure(figsize=FIG_SIZE)
plt.scatter(y_act, y_pred)
plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],
lw=4, color='r')
plt.xlabel('Actual')
plt.ylabel('Predicted')
return fig
my_plot = parity_plot(y_train, obj.predict(X_train))
plt.show(my_plot)
"""
Explanation: Lastly, the experimental values will be scatter plotted against the predicted values. We will use the parity_plot to do so. plt.show() function will just allow the plot to show up.
End of explanation
"""
|
xdnian/pyml
|
assignments/ex01_xdnian.ipynb
|
mit
|
# the function
def sort(values):
# insert your code here
for j in range(len(values)-1,0,-1):
for i in range(0, j):
if values[i] > values[i+1]:
values[i], values[i+1] = values[i+1], values[i]
return values
# main
import numpy as np
# different random seed
np.random.seed()
# generate numbers
N = 10
# the TA will vary the input array size and content during testing
values = np.random.random([N])
sort(values)
correct = True
for index in range(1, len(values)):
if(values[index-1] > values[index]):
correct = False
print('Correct? ' + str(correct))
"""
Explanation: COMP 3314 Assignment 1
<br><br>
Nian Xiaodong (3035087112)
Python + ipynb
The goal of this assignment is to learn/review python and ipynb.
Python is a popular programming language, and also interfaced for several machine learning libraries, such as scikit-learn, Theano, and TensorFlow.
Ipynb is a digital notebook format that allows flexible incorporation of a variety of information, such as code (e.g. python), data, text (e.g. markdown, html, and Latex), images (common raster/vector graphics formats such as jpg and svg), and video (e.g. youtube).
We can also run code and experiments directly inside ipynbs.
Thus, we will use ipynb for all assignments in this class.
Sorting
As a starting exercise, let's try to implement a sorting function via python.
The input to the function is a python array consisting of an arbitrary sequence of numbers.
The output is a sorted sequence with numbers ranging from small to large.
The code stub, along with the test driver, are shown below.
There are various algorithms for sorting with different time complexities with respect to the array size $N$, e.g. $O(N^2)$ for bubble sort and $O(Nlog(N))$ for quick sort.
You can choose any algorithm to implement, as long as it produces correct results with reasonable run-time.
Please submit a single ipynb file, consisting of python code in a code cell and descriptions (including algorithm and analysis of complexity) in a markdown cell.
You can use this ipynb as a start, or create your own.
Code (20 points)
Please implement your algorithm via the function below.
End of explanation
"""
# line model
import numpy as np
class Line(object):
def __init__(self, w0, w1):
self.w0 = w0
self.w1 = w1
def predict(self, x, noise=0):
return (x*self.w1 + self.w0 + noise*np.random.normal())
# Input: data, a 2D array with each (x, t) pair on a row
# Return: w0 and w1, the intercept and slope of the fitted line
def learn(self, data):
# math equations derived above
N = len(data)
sumX = sum(r[0] for r in data)
sumT = sum(r[1] for r in data)
sumX2 = sum(pow(r[0],2) for r in data)
sumXT = sum((r[0]*r[1]) for r in data)
w1 = (N*sumXT - sumX*sumT) / (N*sumX2 - pow(sumX, 2))
w0 = (sumT - w1*sumX) / N
return w0, w1
# test
np.random.seed()
w0 = np.asscalar(np.random.random(1))*2-1
w1 = np.asscalar(np.random.random(1))*2-1
line = Line(w0, w1)
N = 20
noise = 0.05
X = np.random.random([N])
T = []
for x in X:
T.append(np.sum(line.predict(x, noise)))
T = np.array(T)
#data = np.vstack((X, T)).transpose()
data = np.array([X, T]).transpose()
w0_fit, w1_fit = line.learn(data)
line_fit = Line(w0_fit, w1_fit)
print('truth: ' + str(w0) + ' ' + str(w1))
print('predict: ' + str(w0_fit) + ' ' + str(w1_fit))
# plot
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(data[:, 0], data[:, 1], color='black', marker='o')
X_endpoints = [0, 1]
Y_truth, Y_fit = [], []
for x in X_endpoints:
Y_truth.append(line.predict(x))
Y_fit.append(line_fit.predict(x))
plt.plot(X_endpoints, Y_truth, color='blue', label='truth')
plt.plot(X_endpoints, Y_fit, color='red', label='predict')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
"""
Explanation: Line fitting
<img src="./images/01_04.png" width=50%>
Given a set of data points $\left(\mathbf{X}, \mathbf{Y}\right)$, fit a model curve to describe their relationship.
This is actually a regression problem, but we have all seen this in prior math/coding classes to serve as a good example for machine learning.
Recall $\mathbf{Y} = f(\mathbf{X}, \Theta)$ is our model.
For 2D linear curve fitting, the model is a straight line:
$y = w_1 x + w_0$, so the parameters $\Theta = {w_0, w_1}$.
The loss function is $L\left(\mathbf{X}, \mathbf{T}, \mathbf{Y}\right) = \sum_i \left( T^{(i)} - Y^{(i)}\right)^2 = \sum_i \left( T^{(i)} - w_1 X^{(i)} - w_0 \right)^2$.
<br>
($\mathbf{X}$ is a matrix/tensor, and each data sample is a row. We denote the ith sample/row as $\mathbf{X}^{(i)}$.)
For this simple example we don't care about regularization, thus $P(\Theta) = 0$.
The goal is to optimize $\Theta = {w_0, w_1 }$ with given $\left(\mathbf{X}, \mathbf{Y}\right)$ to minimize $L$.
For simple cases like this, we can directly optimize via calculus:
$$
\begin{align}
\frac{\partial L}{\partial w_0} & = 0 \
\frac{\partial L}{\partial w_1} & = 0
\end{align}
$$
Math (30 points)
Write down explicit formulas for $w_0$ and $w_1$ in terms of $\mathbf{X}$ and $\mathbf{T}$.
To minimize $L$,
$$
\left{\begin{matrix}
\frac{\partial L}{\partial w_0} = \frac{\partial}{\partial w_0}\left ( T^{(i)} - w_1 X^{(i)} - w_0 \right )^2 = 0\
\frac{\partial L}{\partial w_1} = \frac{\partial}{\partial w_1}\left ( T^{(i)} - w_1 X^{(i)} - w_0 \right )^2 = 0
\end{matrix}\right.
$$
Thus, we get
$$
\left{\begin{matrix}
w_1 = \frac{(\sum_i 1)(\sum_i(X^{(i)}T^{(i)})-(\sum_i X^{(i)})(\sum_i T^{(i)})}{(\sum_i 1)(\sum_i (X^{(i)})^2)-(\sum_iX^{(i)})^2}\
w_0 = \frac{\sum T^{(i)}-w_1(\sum X^{(i)})}{\sum_i 1}
\end{matrix}\right.
$$
Code (50 points)
Implement your math above in the code below.
End of explanation
"""
|
Kaggle/learntools
|
notebooks/embeddings/raw/1-embeddings.ipynb
|
apache-2.0
|
#$HIDE_INPUT$
# Setup. Import libraries and load dataframes for Movielens data.
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import keras
import os
import random
tf.set_random_seed(1); np.random.seed(1); random.seed(1) # Set random seeds for reproducibility
#_RM_
input_dir = '../input/movielens_preprocessed'
#_UNCOMMENT_
#input_dir = '../input'
ratings_path = os.path.join(input_dir, 'rating.csv')
ratings_df = pd.read_csv(ratings_path, usecols=['userId', 'movieId', 'rating', 'y'])
movies_df = pd.read_csv(os.path.join(input_dir, 'movie.csv'), usecols=['movieId', 'title', 'year'])
df = ratings_df.merge(movies_df, on='movieId').sort_values(by='userId')
df = df.sample(frac=1, random_state=1) # Shuffle
df.sample(5, random_state=1)
"""
Explanation: Welcome to our first lesson on the topic of embeddings. In this lesson, I'll show how to implement a model with embedding layers using the tf.keras API. Embeddings are a technique that enable deep neural nets to work with sparse categorical variables.
Sparse Categorical Variables
By this I mean a categorical variable with lots of possible values (high cardinality), with a small number of them (often just 1) present in any given observation. One good example is words. There are hundreds of thousands of them in the English language, but a single tweet might only have a dozen. Word embeddings are a crucial technique for applying deep learning to natural language. But other examples abound.
For example, this dataset of LA county restaurant inspections has several sparse categorical variables, including:
- employee_id: which of the health department's employees performed this inspection? (~250 distinct values)
- facility_zip: what zip code is the restaurant located in? (~3,000 distinct values)
- owner_name: who owns the restaurant? (~35,000 distinct values)
An embedding layer would be a good idea for using any of these variables as inputs to a network.
In this lesson, I'll be using the MovieLens dataset as an example.
MovieLens
The MovieLens dataset consists of ratings assigned to movies by users. Here's a sample:
End of explanation
"""
#$HIDE_INPUT$
n_movies = len(df.movieId.unique())
n_users = len(df.userId.unique())
print(
"{1:,} distinct users rated {0:,} different movies (total ratings = {2:,})".format(
n_movies, n_users, len(df),
)
)
"""
Explanation: Ratings range from 0.5 stars to 5. Our goal will be to predict the rating a given user $u_i$ will give a particular movie $m_j$. (The column y is just a copy of the rating column with the mean subtracted - this will be useful later.)
userId and movieId are both sparse categorical variables. They have many possible values:
End of explanation
"""
model = keras.Sequential([
# 2 input values: user id and movie id
keras.layers.Dense(256, input_dim=2, activation='relu'),
keras.layers.Dense(32, activation='relu'),
# A single output node, containing the predicted rating
keras.layers.Dense(1)
])
"""
Explanation: Building a rating prediction model in Keras
We want to build a model that takes a user, $u_i$ and a movie, $m_j$, and outputs a number from 0.5-5, representing how many stars we think this user would give that movie.
Aside: You may have noticed that the MovieLens dataset includes information about each movie such as its title, its year of release, a set of genres and user-assigned tags. But for now, we're not going to try to exploit any of that extra information.
I claim we need an embedding layer to handle these inputs. Why? Let's review some alternatives and see why they don't work.
Bad idea #1: Use user ids and movie ids as numerical inputs
Why not feed in user ids and movie ids as inputs, then add on some dense layers and call it a day? i.e.:
End of explanation
"""
input_size = n_movies + n_users
print("Input size = {:,} ({:,} movies + {:,} users)".format(
input_size, n_movies, n_users,
))
model = keras.Sequential([
# One hidden layer with 128 units
keras.layers.Dense(128, input_dim=input_size, activation='relu'),
# A single output node, containing the predicted rating
keras.layers.Dense(1)
])
model.summary()
"""
Explanation: In the simplest terms, neural nets work by doing math on their inputs. But the actual numerical values of the ids assigned to users and movies are meaningless. Schindler's List has id 527 and The Usual Suspects has id 50, but that doesn't mean Schindler's List is 'ten times bigger' than The Usual Suspects.
Bad idea #2: One-hot encoded user and movie inputs
If you're not familiar with one-hot encoding, you may want to check out our lesson Using Categorical Data with One Hot Encoding.
In that lesson, we claim that one-hot encoding is "The Standard Approach for Categorical Data". So why is it a bad idea here? Let's see what a model would look like that took one-hot encoded users and movies.
End of explanation
"""
hidden_units = (32,4)
movie_embedding_size = 8
user_embedding_size = 8
# Each instance will consist of two inputs: a single user id, and a single movie id
user_id_input = keras.Input(shape=(1,), name='user_id')
movie_id_input = keras.Input(shape=(1,), name='movie_id')
user_embedded = keras.layers.Embedding(df.userId.max()+1, user_embedding_size,
input_length=1, name='user_embedding')(user_id_input)
movie_embedded = keras.layers.Embedding(df.movieId.max()+1, movie_embedding_size,
input_length=1, name='movie_embedding')(movie_id_input)
# Concatenate the embeddings (and remove the useless extra dimension)
concatenated = keras.layers.Concatenate()([user_embedded, movie_embedded])
out = keras.layers.Flatten()(concatenated)
# Add one or more hidden layers
for n_hidden in hidden_units:
out = keras.layers.Dense(n_hidden, activation='relu')(out)
# A single output: our predicted rating
out = keras.layers.Dense(1, activation='linear', name='prediction')(out)
model = keras.Model(
inputs = [user_id_input, movie_id_input],
outputs = out,
)
model.summary(line_length=88)
"""
Explanation: A basic issue here is scaling and efficiency. A single input to our model is a vector of 165,237 numbers (of which we know that 165,235 will be zeros). The feature data for our whole dataset of 20 million rating instances will require a 2-d array of size 20,000,000 x 165,237, or about 3 trillion numbers. Good luck fitting that all into memory at once!
Also, doing training and inference on our model will be inefficient. To calculate the activations of our first hidden layer, we'll need to multiply our 165k inputs through about 21 million weights - but the vast, vast majority of those products will just be zero.
One-hot encoding is fine for categorical variables with a small number of possible values, like {Red, Yellow, Green}, or {Monday, Tuesday, Wednesday, Friday, Saturday, Sunday}. But it's not so great in cases like our movie recommendation problem, where variables have tens or hundreds of thousands of possible values.
Good idea: Embedding layers
In short, an embedding layer maps each element in a set of discrete things (like words, users, or movies) to a dense vector of real numbers (its embedding).
Aside: A key implementation detail is that embedding layers take as input the index of the entity being embedded (i.e. we can give it our userIds and movieIds as input). You can think of it as a sort of 'lookup table'. This is much more efficient than taking a one-hot vector and doing a huge matrix multiplication!
As an example, if we learn embeddings of size 8 for movies, the embedding for Legally Blonde (index=4352) might look like:
$$[ 1.624, -0.612, -0.528, -1.073, 0.865, -2.302, 1.745, -0.761]$$
Where do these come from? We initialize an embedding for each user and movie using random noise, then we train them as part of the process of training the overall rating-prediction model.
What do they mean? An object's embedding, if it's any good, should capture some useful latent properties of that object. But the key word here is latent AKA hidden. It's up to the model to discover whatever properties of the entities are useful for the prediction task, and encode them in the embedding space. Sound mysterious? In later lessons I'll show some techniques for interpreting learned embeddings, such as visualizing them with the t-SNE algorithm.
Implementing it
I want my model to look something like this:
A key thing to note is that this network is not simply a stack of layers from input to output. We're treating the user and the movie as separate inputs, which come together only after each has gone through its own embedding layer.
This means that the keras.Sequential class (which you may be familiar with from our course on deep learning with image data) won't work. We'll need to turn to the more powerful 'functional API', using the keras.Model class. For more detail on the Functional API, check out Keras's guide, here.
Here's the code:
End of explanation
"""
model.compile(
# Technical note: when using embedding layers, I highly recommend using one of the optimizers
# found in tf.train: https://www.tensorflow.org/api_guides/python/train#Optimizers
# Passing in a string like 'adam' or 'SGD' will load one of keras's optimizers (found under
# tf.keras.optimizers). They seem to be much slower on problems like this, because they
# don't efficiently handle sparse gradient updates.
tf.train.AdamOptimizer(0.005),
loss='MSE',
metrics=['MAE'],
)
"""
Explanation: Training it
We'll compile our model to minimize squared error ('MSE'). We'll also include absolute error ('MAE') as a metric to report during training, since it's a bit easier to interpret.
Something to think about: We know that ratings can only take on the values {0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5} - so why not treat this as a multiclass classification problem with 10 classes, one for each possible star rating?
End of explanation
"""
history = model.fit(
[df.userId, df.movieId],
df.y,
batch_size=5000,
epochs=20,
verbose=0,
validation_split=.05,
);
"""
Explanation: Let's train the model.
Aside: I'm passing in df.y as my target variable rather than df.rating. The y column is just a 'centered' version of the rating - i.e. the rating column minus its mean over the training set. For example, if the overall average rating in the training set was 3 stars, then we would translate 3 star ratings to 0, 5 star ratings to 2.0, etc. to get y. This is a common practice in deep learning, and tends to help achieve better results in fewer epochs. For more details, feel free to check out this kernel with all the preprocessing I performed on the MovieLens dataset.
End of explanation
"""
#$HIDE_INPUT$
from sklearn.model_selection import train_test_split
from sklearn import metrics
df_train, df_val = train_test_split(df, test_size=.05, random_state=1)
def get_metrics(y_true, y_pred):
return metrics.mean_absolute_error(y_true, y_pred), metrics.mean_squared_error(y_true, y_pred)
mean_rating = df_train['rating'].mean()
print("Average rating in training set is {:.2f} stars".format(mean_rating))
y_true = df_val['rating'].values
always_mean = np.full(y_true.shape, mean_rating)
mae, mse = get_metrics(y_true, always_mean)
print("Always predicting global average rating results in Mean Absolute Error={:.2f}, Mean Squared Error={:.2f}".format(
mae, mse))
movies = movies_df.copy().set_index('movieId')
mean_per_movie = df_train.groupby('movieId')['rating'].mean()
movies['mean_rating'] = mean_per_movie
ratings_per_movie = df_train.groupby('movieId').size()
movies['n_ratings'] = ratings_per_movie
# There are a few movies in the validation set not present in the training set. We'll just use the global
# mean rating in their case.
y_movie_mean = df_val.join(mean_per_movie, on='movieId', rsuffix='mean')['ratingmean'].fillna(mean_rating).values
mae, mse = get_metrics(y_true, y_movie_mean)
print("Predicting mean per movie results in Mean Absolute Error={:.2f}, Mean Squared Error={:.2f}".format(mae, mse))
"""
Explanation: To judge whether our model is any good, it'd be helpful to have a baseline. In the cell below, we calculate the error of a couple dumb baselines: always predicting the global average rating, and predicting the average rating per movie:
End of explanation
"""
#$HIDE_INPUT$
fig, ax = plt.subplots(figsize=(15, 6))
ax.plot(history.epoch, history.history['val_mean_absolute_error'], label='Validation MAE')
ax.plot(history.epoch, history.history['mean_absolute_error'], label='Training MAE')
ax.set_xlabel('Epoch')
ax.set_ylabel('Mean Absolute Error')
ax.set_xlim(left=0, right=history.epoch[-1])
baseline_mae = 0.73
ax.axhline(baseline_mae, ls='--', label='Baseline', color='#002255', alpha=.5)
ax.grid()
fig.legend();
#$HIDE_INPUT$
# Save training history for later comparison
hdf = pd.DataFrame(dict(
epoch=history.epoch,
val_mae=history.history['val_mean_absolute_error'],
train_mae=history.history['mean_absolute_error'],
))
hdf.to_csv('history-1.csv')
"""
Explanation: Here's a plot of our embedding model's absolute error over time. For comparison, our best baseline (predicting the average rating per movie) is marked with a dotted line:
End of explanation
"""
#$HIDE_INPUT$
ratings_per_user = df.groupby('userId').size()
uid = ratings_per_user[ratings_per_user < 30].sample(1, random_state=1).index[0]
user_ratings = df[df.userId==uid]
print("User #{} has rated {} movies (avg. rating = {:.1f}):".format(
uid, len(user_ratings), user_ratings['rating'].mean(),
))
cols = ['userId', 'movieId', 'rating', 'title', 'year']
user_ratings.sort_values(by='rating', ascending=False)[cols]
"""
Explanation: Compared to the baseline, we were able to get our average error down by more than .1 stars (or about 15%). Not bad!
Example predictions
Let's try some example predictions as a sanity check. We'll start by picking out a specific user from the dataset at random.
End of explanation
"""
candidate_movies = movies[
movies.title.str.contains('Naked Gun')
| (movies.title == 'The Sisterhood of the Traveling Pants')
| (movies.title == 'Lilo & Stitch')
].copy()
preds = model.predict([
[uid] * len(candidate_movies), # User ids
candidate_movies.index, # Movie ids
])
# NB: Remember we trained on 'y', which was a version of the rating column centered on 0. To translate
# our model's output values to the original [0.5, 5] star rating scale, we need to 'uncenter' the
# values, by adding the mean back
row = df.iloc[0] # The difference between rating and y will be the same for all rows, so we can just use the first
y_delta = row.rating - row.y
candidate_movies['predicted_rating'] = preds + y_delta
# Add a column with the difference between our predicted rating (for this user) and the movie's
# overall average rating across all users in the dataset.
candidate_movies['delta'] = candidate_movies['predicted_rating'] - candidate_movies['mean_rating']
candidate_movies.sort_values(by='delta', ascending=False)
"""
Explanation: User 26556 has given out two perfect ratings to the movies Airplane! and Airplane II: The Sequel. Great choices! Perhaps they'd also enjoy the The Naked Gun series - another series of spoof films starring Leslie Nielsen.
We don't have as much evidence about what this user hates. Rather than extrapolating from their few low ratings, a better indication of this user's dislikes might be the kinds of movies they haven't even rated. Let's also throw in a couple examples of movies that this user seems unlikely to ever watch, according to their rating history.
End of explanation
"""
|
miguelfrde/stanford-cs231n
|
assignment2/FullyConnectedNets.ipynb
|
mit
|
# As usual, a bit of setup
from __future__ import print_function
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in list(data.items()):
print(('%s: ' % k, v.shape))
"""
Explanation: Fully-Connected Neural Nets
In the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures.
In this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a forward and a backward function. The forward function will receive inputs, weights, and other parameters and will return both an output and a cache object storing data needed for the backward pass, like this:
```python
def layer_forward(x, w):
""" Receive inputs x and weights w """
# Do some computations ...
z = # ... some intermediate value
# Do some more computations ...
out = # the output
cache = (x, w, z, out) # Values we need to compute gradients
return out, cache
```
The backward pass will receive upstream derivatives and the cache object, and will return gradients with respect to the inputs and weights, like this:
```python
def layer_backward(dout, cache):
"""
Receive derivative of loss with respect to outputs and cache,
and compute derivative with respect to inputs.
"""
# Unpack cache values
x, w, z, out = cache
# Use values in cache to compute derivatives
dx = # Derivative of loss with respect to x
dw = # Derivative of loss with respect to w
return dx, dw
```
After implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures.
In addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch Normalization as a tool to more efficiently optimize deep networks.
End of explanation
"""
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print('Testing affine_forward function:')
print('difference: ', rel_error(out, correct_out))
"""
Explanation: Affine layer: foward
Open the file cs231n/layers.py and implement the affine_forward function.
Once you are done you can test your implementaion by running the following:
End of explanation
"""
# Test the affine_backward function
np.random.seed(231)
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print('Testing affine_backward function:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
"""
Explanation: Affine layer: backward
Now implement the affine_backward function and test your implementation using numeric gradient checking.
End of explanation
"""
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 5e-8
print('Testing relu_forward function:')
print('difference: ', rel_error(out, correct_out))
"""
Explanation: ReLU layer: forward
Implement the forward pass for the ReLU activation function in the relu_forward function and test your implementation using the following:
End of explanation
"""
np.random.seed(231)
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 3e-12
print('Testing relu_backward function:')
print('dx error: ', rel_error(dx_num, dx))
"""
Explanation: ReLU layer: backward
Now implement the backward pass for the ReLU activation function in the relu_backward function and test your implementation using numeric gradient checking:
End of explanation
"""
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
np.random.seed(231)
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print('Testing affine_relu_forward:')
print('dx error: ', rel_error(dx_num, dx))
print('dw error: ', rel_error(dw_num, dw))
print('db error: ', rel_error(db_num, db))
"""
Explanation: "Sandwich" layers
There are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file cs231n/layer_utils.py.
For now take a look at the affine_relu_forward and affine_relu_backward functions, and run the following to numerically gradient check the backward pass:
End of explanation
"""
np.random.seed(231)
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print('Testing svm_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print('\nTesting softmax_loss:')
print('loss: ', loss)
print('dx error: ', rel_error(dx_num, dx))
"""
Explanation: Loss layers: Softmax and SVM
You implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in cs231n/layers.py.
You can make sure that the implementations are correct by running the following:
End of explanation
"""
np.random.seed(231)
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-3
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print('Testing initialization ... ')
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print('Testing test-time forward pass ... ')
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print('Testing training loss (no regularization)')
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print('Running numeric gradient check with reg = ', reg)
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
"""
Explanation: Two-layer network
In the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations.
Open the file cs231n/classifiers/fc_net.py and complete the implementation of the TwoLayerNet class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation.
End of explanation
"""
model = TwoLayerNet()
solver = None
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 50% accuracy on the validation set. #
##############################################################################
solver = Solver(model, data,
update_rule='sgd',
optim_config={'learning_rate': 1e-3},
lr_decay=0.95,
num_epochs=10,
batch_size=100,
print_every=100)
solver.train()
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
"""
Explanation: Solver
In the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class.
Open the file cs231n/solver.py and read through it to familiarize yourself with the API. After doing so, use a Solver instance to train a TwoLayerNet that achieves at least 50% accuracy on the validation set.
End of explanation
"""
np.random.seed(231)
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print('Running check with reg = ', reg)
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print('Initial loss: ', loss)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print('%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])))
"""
Explanation: Multilayer network
Next you will implement a fully-connected network with an arbitrary number of hidden layers.
Read through the FullyConnectedNet class in the file cs231n/classifiers/fc_net.py.
Implement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch normalization; we will add those features soon.
Initial loss and gradient check
As a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable?
For gradient checking, you should expect to see errors around 1e-6 or less.
End of explanation
"""
# TODO: Use a three-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
weight_scale = 1e-2
learning_rate = 0.01
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
"""
Explanation: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.
End of explanation
"""
# TODO: Use a five-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
learning_rate = 0.01
weight_scale = 0.04
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
"""
Explanation: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.
End of explanation
"""
from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])
expected_velocity = np.asarray([
[ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])
print('next_w error: ', rel_error(next_w, expected_next_w))
print('velocity error: ', rel_error(expected_velocity, config['velocity']))
"""
Explanation: Inline question:
Did you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net?
Answer:
Tuning was way more difficult with the five layer net. Big weights turned out to overflow eventually, while very small weights caused a noisy training with no clear decrease in loss. I didn't find tuning the learning rate to be more difficult.
Update rules
So far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD.
SGD+Momentum
Stochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochstic gradient descent.
Open the file cs231n/optim.py and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function sgd_momentum and run the following to check your implementation. You should see errors less than 1e-8.
End of explanation
"""
num_train = 4000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print('running with ', update_rule)
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in list(solvers.items()):
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
End of explanation
"""
# Test RMSProp implementation; you should see errors less than 1e-7
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])
print('next_w error: ', rel_error(expected_next_w, next_w))
print('cache error: ', rel_error(expected_cache, config['cache']))
# Test Adam implementation; you should see errors around 1e-7 or less
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],
[ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],
[ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],
[ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])
print('next_w error: ', rel_error(expected_next_w, next_w))
print('v error: ', rel_error(expected_v, config['v']))
print('m error: ', rel_error(expected_m, config['m']))
"""
Explanation: RMSProp and Adam
RMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients.
In the file cs231n/optim.py, implement the RMSProp update rule in the rmsprop function and implement the Adam update rule in the adam function, and check your implementations using the tests below.
[1] Tijmen Tieleman and Geoffrey Hinton. "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude." COURSERA: Neural Networks for Machine Learning 4 (2012).
[2] Diederik Kingma and Jimmy Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015.
End of explanation
"""
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print('running with ', update_rule)
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print()
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in list(solvers.items()):
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules:
End of explanation
"""
best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #
# batch normalization and dropout useful. Store your best model in the #
# best_model variable. #
################################################################################
model = FullyConnectedNet([200, 200, 200, 200, 200], weight_scale=5e-2, use_batchnorm=True, dropout=0.5)
solver = Solver(model, data,
num_epochs=40, batch_size=100,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True,
print_every=1000)
solvers[update_rule] = solver
solver.train()
best_model = model
################################################################################
# END OF YOUR CODE #
################################################################################
"""
Explanation: Train a good model!
Train the best fully-connected model that you can on CIFAR-10, storing your best model in the best_model variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net.
If you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets.
You might find it useful to complete the BatchNormalization.ipynb and Dropout.ipynb notebooks before completing this part, since those techniques can help you train powerful models.
End of explanation
"""
y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1)
y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1)
print('Validation set accuracy: ', (y_val_pred == data['y_val']).mean())
print('Test set accuracy: ', (y_test_pred == data['y_test']).mean())
"""
Explanation: Test you model
Run your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set.
End of explanation
"""
|
palrogg/foundations-homework
|
Data_and_databases/Homework_2_Paul_Ronga_Graded.ipynb
|
mit
|
import pg8000
conn = pg8000.connect(database="homework2")
"""
Explanation: Homework 2: Working with SQL (Data and Databases 2016)
This homework assignment takes the form of an IPython Notebook. There are a number of exercises below, with notebook cells that need to be completed in order to meet particular criteria. Your job is to fill in the cells as appropriate.
You'll need to download this notebook file to your computer before you can complete the assignment. To do so, follow these steps:
Make sure you're viewing this notebook in Github.
Ctrl+click (or right click) on the "Raw" button in the Github interface, and select "Save Link As..." or your browser's equivalent. Save the file in a convenient location on your own computer.
Rename the notebook file to include your own name somewhere in the filename (e.g., Homework_2_Allison_Parrish.ipynb).
Open the notebook on your computer using your locally installed version of IPython Notebook.
When you've completed the notebook to your satisfaction, e-mail the completed file to the address of the teaching assistant (as discussed in class).
Setting the scene
These problem sets address SQL, with a focus on joins and aggregates.
I've prepared a SQL version of the MovieLens data for you to use in this homework. Download this .psql file here. You'll be importing this data into your own local copy of PostgreSQL.
To import the data, follow these steps:
Launch psql.
At the prompt, type CREATE DATABASE homework2;
Connect to the database you just created by typing \c homework2
Import the .psql file you downloaded earlier by typing \i followed by the path to the .psql file.
After you run the \i command, you should see the following output:
CREATE TABLE
CREATE TABLE
CREATE TABLE
COPY 100000
COPY 1682
COPY 943
The table schemas for the data look like this:
Table "public.udata"
Column | Type | Modifiers
-----------+---------+-----------
user_id | integer |
item_id | integer |
rating | integer |
timestamp | integer |
Table "public.uuser"
Column | Type | Modifiers
------------+-----------------------+-----------
user_id | integer |
age | integer |
gender | character varying(1) |
occupation | character varying(80) |
zip_code | character varying(10) |
Table "public.uitem"
Column | Type | Modifiers
--------------------+------------------------+-----------
movie_id | integer | not null
movie_title | character varying(81) | not null
release_date | date |
video_release_date | character varying(32) |
imdb_url | character varying(134) |
unknown | integer | not null
action | integer | not null
adventure | integer | not null
animation | integer | not null
childrens | integer | not null
comedy | integer | not null
crime | integer | not null
documentary | integer | not null
drama | integer | not null
fantasy | integer | not null
film_noir | integer | not null
horror | integer | not null
musical | integer | not null
mystery | integer | not null
romance | integer | not null
scifi | integer | not null
thriller | integer | not null
war | integer | not null
western | integer | not null
Run the cell below to create a connection object. This should work whether you have pg8000 installed or psycopg2.
End of explanation
"""
conn.rollback()
"""
Explanation: If you get an error stating that database "homework2" does not exist, make sure that you followed the instructions above exactly. If necessary, drop the database you created (with, e.g., DROP DATABASE your_database_name) and start again.
In all of the cells below, I've provided the necessary Python scaffolding to perform the query and display the results. All you need to do is write the SQL statements.
As noted in the tutorial, if your SQL statement has a syntax error, you'll need to rollback your connection before you can fix the error and try the query again. As a convenience, I've included the following cell, which performs the rollback process. Run it whenever you hit trouble.
End of explanation
"""
cursor = conn.cursor()
statement = "SELECT movie_title FROM uitem WHERE scifi = 1 AND horror = 1 ORDER BY release_date DESC"
cursor.execute(statement)
for row in cursor:
print(row[0])
"""
Explanation: Problem set 1: WHERE and ORDER BY
In the cell below, fill in the string assigned to the variable statement with a SQL query that finds all movies that belong to both the science fiction (scifi) and horror genres. Return these movies in reverse order by their release date. (Hint: movies are located in the uitem table. A movie's membership in a genre is indicated by a value of 1 in the uitem table column corresponding to that genre.) Run the cell to execute the query.
Expected output:
Deep Rising (1998)
Alien: Resurrection (1997)
Hellraiser: Bloodline (1996)
Robert A. Heinlein's The Puppet Masters (1994)
Body Snatchers (1993)
Army of Darkness (1993)
Body Snatchers (1993)
Alien 3 (1992)
Heavy Metal (1981)
Alien (1979)
Night of the Living Dead (1968)
Blob, The (1958)
End of explanation
"""
cursor = conn.cursor()
statement = "SELECT COUNT(*) FROM uitem WHERE musical = 1 OR childrens = 1"
cursor.execute(statement)
for row in cursor:
print(row[0])
"""
Explanation: Problem set 2: Aggregation, GROUP BY and HAVING
In the cell below, fill in the string assigned to the statement variable with a SQL query that returns the number of movies that are either musicals or children's movies (columns musical and childrens respectively). Hint: use the count(*) aggregate.
Expected output: 157
End of explanation
"""
cursor = conn.cursor()
statement = "SELECT DISTINCT(occupation), COUNT(*) FROM uuser GROUP BY occupation HAVING COUNT(*) > 50"
cursor.execute(statement)
for row in cursor:
print(row[0], row[1])
"""
Explanation: Nicely done. Now, in the cell below, fill in the indicated string with a SQL statement that returns all occupations, along with their count, from the uuser table that have more than fifty users listed for that occupation. (I.e., the occupation librarian is listed for 51 users, so it should be included in these results. There are only 12 lawyers, so lawyer should not be included in the result.)
Expected output:
administrator 79
programmer 66
librarian 51
student 196
other 105
engineer 67
educator 95
Hint: use GROUP BY and HAVING. (If you're stuck, try writing the query without the HAVING first.)
End of explanation
"""
cursor = conn.cursor()
statement = "SELECT DISTINCT(movie_title) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 AND rating = 5 GROUP BY movie_title"
#TA-STEPHAN: Try using this statement
#statement = "SELECT DISTINCT uitem.movie_title, udata.rating FROM uitem JOIN udata ON uitem.movie_id = udata.item_id WHERE documentary = 1 AND udata.rating = 5 AND uitem.release_date < '1992-01-01';"
# if "any" has to be taken in the sense of "every":
# statement = "SELECT movie_title FROM uitem JOIN udata ON uitem.movie_id = udata.item_id WHERE EXTRACT(YEAR FROM release_date) < 1992 GROUP BY movie_title HAVING MIN(rating) = 5"
cursor.execute(statement)
for row in cursor:
print(row[0])
"""
Explanation: Problem set 3: Joining tables
In the cell below, fill in the indicated string with a query that finds the titles of movies in the Documentary genre released before 1992 that received a rating of 5 from any user. Expected output:
Madonna: Truth or Dare (1991)
Koyaanisqatsi (1983)
Paris Is Burning (1990)
Thin Blue Line, The (1988)
Hints:
JOIN the udata and uitem tables.
Use DISTINCT() to get a list of unique movie titles (no title should be listed more than once).
The SQL expression to include in order to find movies released before 1992 is uitem.release_date < '1992-01-01'.
End of explanation
"""
conn.rollback()
cursor = conn.cursor()
statement = "SELECT movie_title), AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title ORDER BY AVG(rating) LIMIT 10"
cursor.execute(statement)
for row in cursor:
print(row[0], "%0.2f" % row[1])
"""
Explanation: Problem set 4: Joins and aggregations... together at last
This one's tough, so prepare yourself. Go get a cup of coffee. Stretch a little bit. Deep breath. There you go.
In the cell below, fill in the indicated string with a query that produces a list of the ten lowest rated movies in the Horror genre. For the purposes of this problem, take "lowest rated" to mean "has the lowest average rating." The query should display the titles of the movies, not their ID number. (So you'll have to use a JOIN.)
Expected output:
Amityville 1992: It's About Time (1992) 1.00
Beyond Bedlam (1993) 1.00
Amityville: Dollhouse (1996) 1.00
Amityville: A New Generation (1993) 1.00
Amityville 3-D (1983) 1.17
Castle Freak (1995) 1.25
Amityville Curse, The (1990) 1.25
Children of the Corn: The Gathering (1996) 1.32
Machine, The (1994) 1.50
Body Parts (1991) 1.62
End of explanation
"""
cursor = conn.cursor()
statement = "SELECT movie_title, AVG(rating) FROM udata JOIN uitem ON uitem.movie_id = udata.item_id WHERE horror = 1 GROUP BY movie_title HAVING COUNT(rating) > 10 ORDER BY AVG(rating) LIMIT 10;"
cursor.execute(statement)
for row in cursor:
print(row[0], "%0.2f" % row[1])
"""
Explanation: BONUS: Extend the query above so that it only includes horror movies that have ten or more ratings. Fill in the query as indicated below.
Expected output:
Children of the Corn: The Gathering (1996) 1.32
Body Parts (1991) 1.62
Amityville II: The Possession (1982) 1.64
Jaws 3-D (1983) 1.94
Hellraiser: Bloodline (1996) 2.00
Tales from the Hood (1995) 2.04
Audrey Rose (1977) 2.17
Addiction, The (1995) 2.18
Halloween: The Curse of Michael Myers (1995) 2.20
Phantoms (1998) 2.23
End of explanation
"""
|
RaspberryJamBe/ipython-notebooks
|
notebooks/en-gb/102 - LEDs - Drive LEDS with the Raspberry Pi GPIO pins.ipynb
|
cc0-1.0
|
#load GPIO library
import RPi.GPIO as GPIO
#Set BCM (Broadcom) mode for the pin numbering
GPIO.setmode(GPIO.BCM)
"""
Explanation: Drive LEDs with the Raspberry Pi GPIO pins
This notebook will walk you through using the Raspberry Pi General Purpose Input/Output (GPIO) pins to make a LED light burn.
The GPIO pins are the 40 (numbered) pins that electronic components can be connected to.
Before actually using them, we have to agree with our Raspberry Pi how to address the pins and we do that with the setmode function.
IPython Instructions:
Place your cursor in the cell below and press Shift+Enter or click the Play button in the toolbar above to execute the code in the cell.
Shift + Enter: Execute the cell and jump to the next one
Ctrl + Enter: Execute the cell, but stay in the current one
Alt + Enter: Execute the cell and create a new one below the current one
As long as a [*] appears to the left os the cell, it is still running. As soon as the code ends, a number appears and any output generated by the code is printed under the cell.
End of explanation
"""
# If we assign the name 'PIN' to the pin number we intend to use, we can reuse it later
# yet still change easily in one place
PIN = 18
# set pin as output
GPIO.setup(PIN, GPIO.OUT)
"""
Explanation: BCM is the numbering that is engraved on the Raspberry Pi case we use and that you can also find back on the printed Pinout schema (BCM stands for Broadcom, the company that produces the Raspberry Pi chip).
Watch out 1: a LED is a diode, which means it is important to send current through in the correct direction. So the difference between the long and short end of the LED connectors is important.
Watch out 2: if left to their own devices, LEDs will consume more current than is good for them and will subsequently burn out. If it's a bad day, the Raspberry Pi might get damaged because of it... To prevent this, we need to add a resistor (in series!)
Connect the long end of the LED with GPIO18 on the Pi.
Place a low value resistor (220-360 Ohm) in series with the LED.
Illustration:
<img src="LED01.png" height="300"/>
Then all that is left in our setup is to tell the Raspberry Pi that we intend to use GPIO18 as output, so we can change the voltage on that pin. This way we can send current on the pin to turn on the LED.
End of explanation
"""
import time
# Repeat forever
while True:
# turn off pin 18
GPIO.output(PIN, 0)
# wait for half a second
time.sleep(.5)
# turn on pin 18
GPIO.output(PIN, 1)
# wait for half a second
time.sleep(.5)
#... and again ...
"""
Explanation: With all GPIO settings done, we can put pin GPIO18 to work.
To do this, we import the time library, so we can use its time related functionality: time.sleep(x) is a function that tells the computer to wait for a number of seconds before continuing with the next instruction.
End of explanation
"""
#reset the GPIO
PIN = 18
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.OUT)
# Create PWM object and set its frequency in Hz (cycles per second)
led = GPIO.PWM(PIN, 60)
# Start PWM signal
led.start(0)
try:
while True:
# increase duty cycle by 1%
for dc in range(0, 101, 1):
led.ChangeDutyCycle(dc)
time.sleep(0.05)
# and down again ...
for dc in range(100, -1, -1):
led.ChangeDutyCycle(dc)
time.sleep(0.05)
except KeyboardInterrupt:
pass
led.stop()
GPIO.cleanup()
"""
Explanation: Note: To stop the running code, you can:
click the stop button in the toolbar above
choose Kernel > Interrupt from the dropdown menu
use a keyboard shortcut: type i twice (only while the cell is in "command mode" (indicated by a grey border).
Pulse Width Modulation
So we can now send binary commands ("on" and "off"), but the GPIO library also allows us to simulate an analog signal, also called PWM (Pulse Width Modulation). A PWM signal consists of a quick succession of on and off signals where the ratio between the total on and the total off time indicates the pseudo-analog level between 0 and 1.
E.g. 75% of the time "on" == a duty cycle of 75% == an analog signal of 0.75 (or 75% of the max amplitude)
In the case of a LED, this will manifest itself as a stronger or weaker light being emitted from the LED. In different motors this can be the rotating speed or the angle to which the axis moves (or is held).
End of explanation
"""
|
epeios-q37/epeios
|
tools/xdhq/examples/PYH/Hello.ipynb
|
agpl-3.0
|
try:
import atlastk
except:
!pip install atlastk
import atlastk
atlastk.setJupyterHeight("150px") # Adjusting the height of the iframe in which the application will be displayed…
"""
Explanation: If you haven't already done so, please take a look at this FAQ, especially if you run this notebook on Google Colab.
If the atlastk module is not installed, it will be installed from Pypi.
Don't worry, it's a very lightweight package (~20 Ko), and it has no dependencies…
End of explanation
"""
BODY = """
<fieldset>
<input id="Input" maxlength="20" placeholder="Enter a name here" type="text"
xdh:onevent="Submit" value="world"/>
<button xdh:onevent="Submit">Submit</button>
<hr/>
<fieldset>
<output id="Output"/>
</fieldset>
</fieldset>
"""
"""
Explanation: Let's now define the body of the base HTML page.
End of explanation
"""
from IPython.display import display, HTML
display(HTML(BODY))
def ac_connect(dom):
dom.inner("", BODY) # The empty string (`""`) refers to the root element of the HTML page.
dom.focus("Input")
"""
Explanation: And the function which will be called on each new session.
End of explanation
"""
callbacks = {
"": ac_connect # The empty string ("") refers to the action called on each new session.
}
"""
Explanation: Let's now associate the Connect action to this function.
End of explanation
"""
atlastk.launch(callbacks)
"""
Explanation: Let's display the apps main page.
Click on the URL wich will be displayed below to open the app on it's own page ot tab.
Clicking on the Submit button or typing on Enter in the text field will display an error message, as there is no function assigned to the associated Submit action.
End of explanation
"""
def ac_submit(dom):
dom.set_value("Output", f"Hello, {dom.get_value('Input')}!")
dom.set_value("Input", "")
dom.focus("Input")
"""
Explanation: Let's now create the action which will be associated to the Submit action.
End of explanation
"""
callbacks["Submit"] = ac_submit;
"""
Explanation: Let's associate this function to the Submit action defined as the xdh:event attribute on both input and button element of the HTML page.
End of explanation
"""
(callbacks)
"""
Explanation: Let's look what the callback variable looks like.
End of explanation
"""
atlastk.launch(callbacks)
"""
Explanation: And let's relaunch the app.
Clicking on the Submit button or typing on Enter in the text field will now display the expected greeting message.
End of explanation
"""
|
pauliacomi/pyGAPS
|
docs/examples/iast.ipynb
|
mit
|
# import isotherms
%run import.ipynb
# import the iast module
import pygaps
import pygaps.iast as pgi
import matplotlib.pyplot as plt
import numpy
"""
Explanation: IAST examples
The IAST method is used to predict the composition of the adsorbed phase in a
multicomponent adsorption system, starting from pure component isotherms. First,
make sure the data is imported by running the import notebook.
End of explanation
"""
isotherms_iast_models = []
isotherm = next(i for i in isotherms_iast if i.material=='MOF-5(Zn)')
print('Isotherm sample:', isotherm.material)
for isotherm in isotherms_iast:
model = pygaps.ModelIsotherm.from_pointisotherm(isotherm, model='Langmuir', verbose=True)
isotherms_iast_models.append(model)
"""
Explanation: Using models
The IAST calculation is often performed by fitting a model to the isotherm
rather than on the isotherms themselves, as spreading pressure can be computed
efficiently when using most common models. Let's first fit the Langmuir model to
both isotherms.
End of explanation
"""
gas_fraction = [0.5, 0.5]
total_pressure = 10
pgi.iast_point_fraction(isotherms_iast_models, gas_fraction, total_pressure, verbose=True)
"""
Explanation: Now we can perform the IAST calculation with the resulting models. We specify
the partial pressures of each component in the gaseous phase to obtain the
composition of the adsorbed phase.
End of explanation
"""
mole_fractions = [0.5, 0.5]
pressure_range = numpy.linspace(0.01, 20, 30)
result_dict = pgi.iast_binary_svp(
isotherms_iast_models,
mole_fractions,
pressure_range,
verbose=True,
)
"""
Explanation: Alternatively, if we are interested in a binary system, we can use the extension
functions iast_binary_svp and iast_binary_vle to obtain how the selectivity
changes based on pressure in a constant composition or, respectively, how the
gas phase-adsorbed phase changes with gas composition, at constant pressure.
These functions perform the IAST calculation at every point in the range passed
and can plot the results. If interested in the selectivity for one component in
an equimolar mixture over a pressure range:
End of explanation
"""
total_pressure = 2
result_dict = pgi.iast_binary_vle(
isotherms_iast_models,
total_pressure,
verbose=True,
)
"""
Explanation: Or if interested on a adsorbed - gas phase equilibrium line:
End of explanation
"""
gas_fraction = [0.5, 0.5]
total_pressure = 10
pgi.iast_point_fraction(
isotherms_iast_models,
gas_fraction,
total_pressure,
verbose=True,
)
"""
Explanation: Using isotherms directly - interpolation
The isotherms themselves can be used directly. However, instead of spreading
pressure being calculated from the model, it will be approximated through
interpolation and numerical quadrature integration.
End of explanation
"""
mole_fraction = [0.5, 0.5]
pressure_range = numpy.linspace(0.01, 20, 30)
result_dict = pgi.iast_binary_svp(
isotherms_iast,
mole_fraction,
pressure_range,
verbose=True,
)
result_dict = pgi.iast_binary_vle(
isotherms_iast,
total_pressure=2,
verbose=True,
)
"""
Explanation: The binary mixture functions can also accept PointIsotherm objects.
End of explanation
"""
|
probml/pyprobml
|
notebooks/book2/17/gp_poisson_1d.ipynb
|
mit
|
try:
import tinygp
except ImportError:
%pip install -q tinygp
try:
import numpyro
except ImportError:
# It is much faster to use CPU than GPU.
# This is because Colab has multiple CPU cores, so can run the 2 MCMC chains in parallel
%pip uninstall -y jax jaxlib
%pip install -q numpyro jax jaxlib
#%pip install numpyro[cuda] -f https://storage.googleapis.com/jax-releases/jax_releases.html
try:
import arviz
except ImportError:
%pip install arviz
"""
Explanation: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/gp_poisson_1d.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
GP with a Poisson Likelihood
https://tinygp.readthedocs.io/en/latest/tutorials/likelihoods.html
We use the tinygp library to define the model, and the numpyro library to do inference, using either MCMC or SVI.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
random = np.random.default_rng(203618)
x = np.linspace(-3, 3, 20)
true_log_rate = 2 * np.cos(2 * x)
y = random.poisson(np.exp(true_log_rate))
plt.plot(x, y, ".k", label="data")
plt.plot(x, np.exp(true_log_rate), "C1", label="true rate")
plt.legend(loc=2)
plt.xlabel("x")
_ = plt.ylabel("counts")
plt.savefig("gp-poisson-data.pdf")
"""
Explanation: Data
End of explanation
"""
import jax
import jax.numpy as jnp
try:
import numpyro
except ModuleNotFoundError:
%pip install -qq numpyro
import numpyro
import numpyro.distributions as dist
try:
from tinygp import kernels, GaussianProcess
except ModuleNotFoundError:
%pip install -qq tinygp
from tinygp import kernels, GaussianProcess
# We'll enable float64 support here for better numerical performance
from jax.config import config
config.update("jax_enable_x64", True)
def model(x, y=None):
# The parameters of the GP model
mean = numpyro.sample("mean", dist.Normal(0.0, 2.0))
sigma = numpyro.sample("sigma", dist.HalfNormal(3.0))
rho = numpyro.sample("rho", dist.HalfNormal(10.0))
# Set up the kernel and GP objects
kernel = sigma**2 * kernels.Matern52(rho)
gp = GaussianProcess(kernel, x, diag=1e-5, mean=mean)
# This parameter has shape (num_data,) and it encodes our beliefs about
# the process rate in each bin
log_rate = numpyro.sample("log_rate", gp.numpyro_dist())
# Finally, our observation model is Poisson
numpyro.sample("obs", dist.Poisson(jnp.exp(log_rate)), obs=y)
# Run the MCMC
nuts_kernel = numpyro.infer.NUTS(model, target_accept_prob=0.9)
mcmc = numpyro.infer.MCMC(
nuts_kernel,
num_warmup=500,
num_samples=500,
num_chains=2,
progress_bar=False,
)
rng_key = jax.random.PRNGKey(55873)
mcmc.run(rng_key, x, y=y)
samples = mcmc.get_samples()
"""
Explanation: Markov chain Monte Carlo (MCMC)
We set up the model in numpyro and run MCMC.
Note that the log_rate parameter doesn't have the obs=... argument set, since it is latent.
End of explanation
"""
q = np.percentile(samples["log_rate"], [5, 25, 50, 75, 95], axis=0)
plt.plot(x, np.exp(q[2]), color="C0", label="MCMC inferred rate")
plt.fill_between(x, np.exp(q[0]), np.exp(q[-1]), alpha=0.3, lw=0, color="C0")
plt.fill_between(x, np.exp(q[1]), np.exp(q[-2]), alpha=0.3, lw=0, color="C0")
plt.plot(x, np.exp(true_log_rate), "--", color="C1", label="true rate")
plt.plot(x, y, ".k", label="data")
plt.legend(loc=2)
plt.xlabel("x")
_ = plt.ylabel("counts")
plt.savefig("gp-poisson-mcmc.pdf")
"""
Explanation: We can summarize the MCMC results by plotting our inferred model (here we're showing the 1- and 2-sigma credible regions), and compare it to the known ground truth:
End of explanation
"""
def guide(x, y=None):
numpyro.param("mean", jnp.zeros(()))
numpyro.param("sigma", jnp.ones(()), constraint=dist.constraints.positive)
numpyro.param("rho", 2 * jnp.ones(()), constraint=dist.constraints.positive)
mu = numpyro.param("log_rate_mu", jnp.zeros_like(x) if y is None else jnp.log(y + 1))
sigma = numpyro.param(
"log_rate_sigma",
jnp.ones_like(x),
constraint=dist.constraints.positive,
)
numpyro.sample("log_rate", dist.Normal(mu, sigma))
optim = numpyro.optim.Adam(0.01)
svi = numpyro.infer.SVI(model, guide, optim, numpyro.infer.Trace_ELBO(10))
results = svi.run(jax.random.PRNGKey(55873), 3000, x, y=y, progress_bar=False)
"""
Explanation: Stochastic variational inference (SVI)
For larger datasets, it is faster to use stochastic variational inference (SVI) instead of MCMC.
End of explanation
"""
mu = results.params["log_rate_mu"]
sigma = results.params["log_rate_sigma"]
plt.plot(x, np.exp(mu), color="C0", label="VI inferred rate")
plt.fill_between(
x,
np.exp(mu - 2 * sigma),
np.exp(mu + 2 * sigma),
alpha=0.3,
lw=0,
color="C0",
)
plt.fill_between(x, np.exp(mu - sigma), np.exp(mu + sigma), alpha=0.3, lw=0, color="C0")
plt.plot(x, np.exp(true_log_rate), "--", color="C1", label="true rate")
plt.plot(x, y, ".k", label="data")
plt.legend(loc=2)
plt.xlabel("x")
_ = plt.ylabel("counts")
plt.savefig("gp-poisson-svi.pdf")
"""
Explanation: As above, we can plot our inferred conditional model and compare it to the ground truth:
End of explanation
"""
|
Juanlu001/poliastro
|
docs/source/examples/Using NEOS package.ipynb
|
mit
|
from astropy import time
from poliastro.twobody.orbit import Orbit
from poliastro.bodies import Earth
from poliastro.frames import Planes
from poliastro.plotting import StaticOrbitPlotter
"""
Explanation: Analyzing NEOs
NEO stands for near-Earth object. The Center for NEO Studies (CNEOS) defines NEOs as comets and asteroids that have been nudged by the gravitational attraction of nearby planets into orbits that allow them to enter the Earth’s neighborhood.
And what does "near" exactly mean? In terms of orbital elements, asteroids and comets can be considered NEOs if their perihelion (orbit point which is nearest to the Sun) is less than 1.3 au = 1.945 * 10<sup>8</sup> km from the Sun.
End of explanation
"""
eros = Orbit.from_sbdb("Eros")
eros.plot(label="Eros");
"""
Explanation: Small Body Database (SBDB)
End of explanation
"""
ganymed = Orbit.from_sbdb("1036") # Ganymed IAU number
amor = Orbit.from_sbdb("2001221") # Amor SPK-ID
eros = Orbit.from_sbdb("2000433") # Eros SPK-ID
frame = StaticOrbitPlotter(plane=Planes.EARTH_ECLIPTIC)
frame.plot(ganymed, label="Ganymed")
frame.plot(amor, label="Amor")
frame.plot(eros, label="Eros");
"""
Explanation: You can also search by IAU number or SPK-ID (there is a faster neows.orbit_from_spk_id() function in that case, although):
End of explanation
"""
try:
Orbit.from_sbdb("*alley")
except ValueError as err:
print(err)
"""
Explanation: You can use the wildcards from that browser: * and ?.
<div class="alert alert-info">Keep it in mind that `from_sbdb()` can only return one Orbit, so if several objects are found with that name, it will raise an error with the different bodies.</div>
End of explanation
"""
eros.epoch.iso
epoch = time.Time(2458000.0, scale="tdb", format="jd")
eros_november = eros.propagate(epoch)
eros_november.epoch.iso
"""
Explanation: <div class="alert alert-info">Note that epoch is provided by the service itself, so if you need orbit on another epoch, you have to propagate it:</div>
End of explanation
"""
from poliastro.neos import dastcom5
atira = dastcom5.orbit_from_name("atira")[0] # NEO
wikipedia = dastcom5.orbit_from_name("wikipedia")[0] # Asteroid, but not NEO.
frame = StaticOrbitPlotter()
frame.plot(atira, label="Atira (NEO)")
frame.plot(wikipedia, label="Wikipedia (asteroid)");
"""
Explanation: DASTCOM5 module
This module can also be used to get NEOs orbit, in the same way that neows, but it have some advantages (and some disadvantages).
It relies on DASTCOM5 database, a NASA/JPL maintained asteroid and comet database. This database has to be downloaded at least once in order to use this module. According to its README, it is updated typically a couple times per day, but
potentially as frequently as once per hour, so you can download it whenever you want the more recently discovered bodies. This also means that, after downloading the file, you can use the database offline.
The file is a ~230 MB zip that you can manually download and unzip in ~/.poliastro or, more easily, you can use
Python
dastcom5.download_dastcom5()
The main DASTCOM5 advantage over NeoWs is that you can use it to search not only NEOs, but any asteroid or comet. The easiest function is orbit_from_name():
End of explanation
"""
halleys = dastcom5.orbit_from_name("1P")
frame = StaticOrbitPlotter()
frame.plot(halleys[0], label="Halley")
frame.plot(halleys[5], label="Halley")
frame.plot(halleys[10], label="Halley")
frame.plot(halleys[20], label="Halley")
frame.plot(halleys[-1], label="Halley");
"""
Explanation: Keep in mind that this function returns a list of orbits matching your string. This is made on purpose given that there are comets which have several records in the database (one for each orbit determination in history) what allow plots like this one:
End of explanation
"""
ast_db = dastcom5.asteroid_db()
comet_db = dastcom5.comet_db()
ast_db.dtype.names[
:20
] # They are more than 100, but that would be too much lines in this notebook :P
"""
Explanation: While neows can only be used to get Orbit objects, dastcom5 can also provide asteroid and comet complete database.
Once you have this, you can get specific data about one or more bodies. The complete databases are ndarrays, so if you want to know the entire list of available parameters, you can look at the dtype, and they are also explained in
documentation API Reference:
End of explanation
"""
aphelion_condition = 2 * ast_db["A"] - ast_db["QR"] < 0.983
axis_condition = ast_db["A"] < 1.3
atiras = ast_db[aphelion_condition & axis_condition]
"""
Explanation: <div class="alert alert-info">Asteroid and comet parameters are not exactly the same (although they are very close)</div>
With these ndarrays you can classify asteroids and comets, sort them, get all their parameters, and whatever comes to your mind.
For example, NEOs can be grouped in several ways. One of the NEOs group is called Atiras, and is formed by NEOs whose orbits are contained entirely with the orbit of the Earth. They are a really little group, and we can try to plot all of these NEOs using asteroid_db():
Talking in orbital terms, Atiras have an aphelion distance, Q < 0.983 au and a semi-major axis, a < 1.0 au.
Visiting documentation API Reference, you can see that DASTCOM5 provides semi-major axis, but doesn't provide aphelion distance. You can get aphelion distance easily knowing perihelion distance (q, QR in DASTCOM5) and semi-major axis Q = 2*a - q, but there are probably many other ways.
End of explanation
"""
len(atiras)
"""
Explanation: The number of Atira NEOs we use using this method is:
End of explanation
"""
from poliastro.bodies import Earth
frame = StaticOrbitPlotter()
frame.plot_body_orbit(Earth, time.Time.now().tdb)
for record in atiras["NO"]:
ss = dastcom5.orbit_from_record(record)
if ss.ecc < 1:
frame.plot(ss, color="#666666")
else:
print(f"Skipping hyperbolic orbit: {record}")
"""
Explanation: Which is consistent with the stats published by CNEOS
Now we're gonna plot all of their orbits, with corresponding labels, just because we love plots :)
We only need to get the 16 orbits from these 16 ndarrays.
There are two ways:
Gather all their orbital elements manually and use the Orbit.from_classical() function.
Use the NO property (logical record number in DASTCOM5 database) and the dastcom5.orbit_from_record() function.
The second one seems easier and it is related to the current notebook, so we are going to use that one, using the ASTNAM property of DASTCOM5 database:
End of explanation
"""
frame = StaticOrbitPlotter()
frame.plot_body_orbit(Earth, time.Time.now().tdb)
for i in range(len(atiras)):
record = atiras["NO"][i]
label = atiras["ASTNAM"][i].decode().strip() # DASTCOM5 strings are binary
ss = dastcom5.orbit_from_record(record)
if ss.ecc < 1:
frame.plot(ss, label=label)
else:
print(f"Skipping hyperbolic orbit: {label}")
"""
Explanation: If we needed also the names of each asteroid, we could do:
End of explanation
"""
db = dastcom5.entire_db()
db.columns
"""
Explanation: <div class="alert alert-info">We knew beforehand that there are no `Atira` comets, only asteroids (comet orbits are usually more eccentric), but we could use the same method with `com_db` if we wanted.</div>
Finally, another interesting function in dastcom5 is entire_db(), which is really similar to ast_db and com_db, but it returns a Pandas dataframe instead of a numpy ndarray. The dataframe has asteroids and comets in it, but in order to achieve that (and a more manageable dataframe), a lot of parameters were removed, and others were renamed:
End of explanation
"""
db[
db.NAME == "Halley"
] # As you can see, Halley is the name of an asteroid too, did you know that?
"""
Explanation: Also, in this function, DASTCOM5 data (specially strings) is ready to use (decoded and improved strings, etc):
End of explanation
"""
aphelion_condition = (2 * db["A"] - db["QR"]) < 0.983
axis_condition = db["A"] < 1.3
atiras = db[aphelion_condition & axis_condition]
len(atiras)
"""
Explanation: Panda offers many functionalities, and can also be used in the same way as the ast_db and comet_db functions:
End of explanation
"""
len(atiras[atiras.A < 0])
"""
Explanation: What? I said they can be used in the same way!
Dont worry :) If you want to know what's happening here, the only difference is that we are now working with comets too, and some comets have a negative semi-major axis!
End of explanation
"""
axis_condition = (db["A"] < 1.3) & (db["A"] > 0)
atiras = db[aphelion_condition & axis_condition]
len(atiras)
"""
Explanation: So, rewriting our condition:
End of explanation
"""
|
d-meiser/cold-atoms
|
examples/Optimization of Coulomb force evaluation.ipynb
|
gpl-3.0
|
import coldatoms
import numpy as np
%matplotlib notebook
import matplotlib.pyplot as plt
import time
"""
Explanation: Evaluation of performance of Coulomb force evaluation
In this notebook we have a quick look at the performance of the Coulomb force evalution and our optimizations. The timing results in this notebook were obtained on a low end AMD A6 laptop CPU.
End of explanation
"""
def measure_time(num_ptcls, per_ptcl_charges=False, num_iter=1, use_reference_impl=False):
ensemble = coldatoms.Ensemble(num_ptcls=num_ptcls)
ensemble.x = np.random.random([num_ptcls, 3])
if per_ptcl_charges:
ensemble.set_particle_properties('charge', np.random.random(num_ptcls))
else:
ensemble.ensemble_properties['charge'] = 1.0
f = coldatoms.CoulombForce()
if use_reference_impl:
f.use_reference_implementations()
accumulated_force = np.zeros_like(ensemble.v)
t0 = time.time()
for i in range(num_iter):
f.force(1.0e-1, ensemble, accumulated_force)
t1 = time.time()
return t1 - t0
"""
Explanation: We evaluate the performance by doing a few force evalutations and measuring the elapsed time:
End of explanation
"""
def num_ptcls(n_min, n_max, n):
return [n_min * (n_max / n_min)**(i/(n - 1)) for i in range(n)]
"""
Explanation: We consider particle numbers on a logarithmic scale:
End of explanation
"""
nptcls = np.array(num_ptcls(1, 1000, 30))
times = np.array([measure_time(int(round(n)), num_iter=5) for n in nptcls])
pairs_per_second = nptcls**2 / times
c4_nptcls = np.array([ 1.00000000e+00, 1.37382380e+00, 1.88739182e+00, 2.59294380e+00,
3.56224789e+00, 4.89390092e+00, 6.72335754e+00, 9.23670857e+00,
1.26896100e+01, 1.74332882e+01, 2.39502662e+01, 3.29034456e+01,
4.52035366e+01, 6.21016942e+01, 8.53167852e+01, 1.17210230e+02,
1.61026203e+02, 2.21221629e+02, 3.03919538e+02, 4.17531894e+02,
5.73615251e+02, 7.88046282e+02, 1.08263673e+03, 1.48735211e+03,
2.04335972e+03, 2.80721620e+03, 3.85662042e+03, 5.29831691e+03,
7.27895384e+03, 1.00000000e+04])
c4_pairs_per_second = np.array([ 8.21896849e+01, 8.99578985e+04, 1.69785802e+05, 3.20452334e+05,
6.04819115e+05, 1.04640310e+06, 1.97497265e+06, 3.40804318e+06,
5.36026069e+06, 8.44192672e+06, 1.04605076e+07, 1.35549480e+07,
1.54981408e+07, 1.70811388e+07, 1.48204588e+07, 1.83218908e+07,
1.87899007e+07, 1.81859734e+07, 1.84545152e+07, 1.85655733e+07,
1.86642306e+07, 1.86659059e+07, 1.87018426e+07, 1.87843680e+07,
1.87249206e+07, 1.87188841e+07, 1.86915702e+07, 1.86896431e+07,
1.86820557e+07, 1.87087450e+07])
c4_pairs_per_second_opt = np.array([ 7.77428407e+01, 8.60466855e+04, 1.77870840e+05, 3.35711969e+05,
5.78522632e+05, 1.14153065e+06, 2.06084102e+06, 3.68911890e+06,
5.77258843e+06, 9.17072615e+06, 1.24658899e+07, 1.54452639e+07,
1.81193908e+07, 1.97748636e+07, 1.66558348e+07, 2.03540609e+07,
2.14466467e+07, 2.16752990e+07, 2.15554241e+07, 2.16672624e+07,
2.17012696e+07, 2.17274807e+07, 2.16995063e+07, 2.17516952e+07,
2.17600542e+07, 2.17549355e+07, 2.17497805e+07, 2.17769467e+07,
2.17739088e+07, 2.17851869e+07])
plt.figure()
plt.loglog(nptcls, pairs_per_second);
plt.loglog(c4_nptcls, c4_pairs_per_second);
plt.loglog(c4_nptcls, c4_pairs_per_second_opt);
"""
Explanation: Here is the performance of the naive C implementation (for comparison we also show the performance obtained on a AWS EC2 C4 instance).
End of explanation
"""
times[0]
"""
Explanation: The latency can be inferred from the time it takes to deal with just one pair:
End of explanation
"""
pairs_per_second[-1]
"""
Explanation: In the limit of large numbers of particles we process on the order of $6\times 10^6$ particle pairs per second:
End of explanation
"""
def const_rate_model(latency, rate, num_ptcls):
num_pairs = num_ptcls**2
total_time = latency + num_pairs / rate
return num_pairs / total_time
plt.figure()
plt.loglog(nptcls, pairs_per_second)
plt.loglog(nptcls, [const_rate_model(5.0e-5, 8.0e6, n) for n in nptcls]);
"""
Explanation: For the numbers of particles considered here we do not observe any cache effects yet. This could be due to inefficiencies in the force evaluation function or due to the relatively small number of particles (Easily fits in L2, almost fits into L1 cache). We can model the processing speed using latency and the processing speed at large numbers of particles.
End of explanation
"""
nptcls = np.array([ 1. , 1.268961 , 1.61026203, 2.04335972,
2.5929438 , 3.29034456, 4.17531894, 5.29831691,
6.72335754, 8.53167852, 10.82636734, 13.73823796,
17.43328822, 22.12216291, 28.07216204, 35.6224789 ,
45.20353656, 57.3615251 , 72.78953844, 92.36708572,
117.21022975, 148.73521073, 188.73918221, 239.502662 ,
303.91953823, 385.66204212, 489.39009185, 621.01694189,
788.04628157, 1000. ])
"""
Explanation: Comparison of different implementations
In the following we compare a few different implementations.
Timing data
Some of the timing data required recompilation of the coldatoms c-extension and thus restarting of the python kernel. We therefore inline the timing data here. For all timing data we use the following numbers of particles:
End of explanation
"""
pairs_per_second_reference = np.array([ 3754.97224709, 10422.72910995, 6161.80993837, 10058.90690229,
7977.31411846, 12252.85370598, 7772.16310828, 10301.24072883,
8244.43945831, 9103.42166074, 9766.48063046, 9611.93684861,
10848.58925705, 10003.22038508, 10536.11487913, 10502.85977021,
13790.40041135, 14756.04096312, 13686.40446465, 14516.38360699,
13543.29197737, 13759.00597281, 13842.27083136, 13488.36978563,
12883.47362135, 12343.43336072, 11535.69300621, 11728.47328488,
11188.22477577, 8771.32862753])
"""
Explanation: The reference implementation has this performance:
End of explanation
"""
pairs_per_second_naive_c = np.array([ 10699.75510204, 30839.85584258, 55206.0638283 ,
90738.63706541, 136230.94427643, 221507.68668544,
363783.63504059, 580015.67337796, 857906.67158507,
1327397.61712042, 1862179.30155215, 2692617.37091628,
3417509.20801424, 3759433.7356532 , 5912890.28819334,
6210511.33097665, 6165807.07674836, 6578029.24543723,
6245854.91663751, 7587882.39220302, 7396963.5969694 ,
7803134.84028501, 8355880.86492011, 8627377.42296997,
8725380.89446372, 8792556.68439878, 8841519.959524 ,
8266728.56837714, 6405629.27527453, 7742010.74647583])
"""
Explanation: Here is the performance of a direct translation of the reference implementation to C:
End of explanation
"""
pairs_per_second_chunked = np.array([ 4.40592035e+01, 3.26276736e+04, 5.57722799e+04,
8.88962282e+04, 1.41707565e+05, 2.25915800e+05,
3.63783635e+05, 5.63364506e+05, 8.46416850e+05,
1.31595453e+06, 1.99843632e+06, 2.58702453e+06,
3.70561318e+06, 4.41430284e+06, 5.56448766e+06,
4.47261194e+06, 6.02281928e+06, 6.84558797e+06,
6.13549194e+06, 6.95519016e+06, 6.78948349e+06,
6.53108161e+06, 6.84557435e+06, 6.45104368e+06,
6.50288098e+06, 6.45530515e+06, 5.69232280e+06,
4.99511738e+06, 7.79344758e+06, 7.58281281e+06])
"""
Explanation: Here is a C implementation with where the outer and inner particle loops in the pairwise force are chunked to enable vectorizatio and caching. This version was compiled with -msse4 and thus uses SSE4 vectorization. This is currently the default implementation used in the coldatoms library.
End of explanation
"""
pairs_per_second_chunked_avx = np.array([ 12409.18343195, 37943.4181434 , 62146.25470958,
100071.75402071, 168861.11057018, 267112.2104148 ,
395246.00347654, 684553.38195189, 1030420.5131538 ,
1503948.03910195, 2204553.07448326, 3166518.02819754,
4652302.68098289, 5654685.45362713, 7002766.25233067,
6867623.49760902, 8232922.03331433, 9133491.30173879,
8718223.53605189, 8283438.27815497, 7369528.89377051,
7376934.04244149, 8322369.84045209, 7516375.83786946,
7549459.96638704, 7623711.51199181, 7380784.94405883,
6349442.00772738, 6432029.20628165, 8554706.17509566])
"""
Explanation: The following version was compiled with -mavx. On the laptop used for these timing studies this is the most advanced vector instruction set.
End of explanation
"""
pairs_per_second_chunked_avx_openmp = np.array([ 1.43566798e+02, 1.78481764e+02, 3.11719871e+02,
4.78615932e+02, 7.87616061e+02, 1.12574251e+03,
1.99908441e+03, 3.10250538e+03, 5.52021704e+03,
5.58577039e+03, 1.43348983e+04, 2.15350791e+04,
3.36393871e+04, 2.45797009e+04, 5.68420037e+04,
5.04015929e+06, 7.26927213e+06, 8.20006260e+06,
9.90760223e+06, 9.57827981e+06, 8.98103903e+06,
1.04068053e+07, 1.03363200e+07, 3.15302633e+06,
2.49586490e+06, 7.32430894e+06, 6.46459903e+06,
7.77060651e+06, 1.17015150e+07, 1.19050503e+07])
"""
Explanation: The following implementation uses OpenMP to parallelize the outer loop over particles. The laptop used for these timing studies has 2 cores. Therfore only a modest acceleration was achieved. For large numbers of particles we expect a nearly ideal speedup of 2x.
End of explanation
"""
plt.figure()
plt.loglog(nptcls, pairs_per_second_reference, '.-')
plt.loglog(nptcls, pairs_per_second_naive_c, 's-', ms=5)
plt.loglog(nptcls, pairs_per_second_chunked, 'd-', ms=5)
plt.loglog(nptcls, pairs_per_second_chunked_avx, '^-', ms=5)
plt.loglog(nptcls, pairs_per_second_chunked_avx_openmp, 'v-', ms=5)
plt.xlabel('nptcls')
plt.ylabel('pairs / s');
"""
Explanation: Visualization of the timing data
The following plot shows the performance of the different implementations
End of explanation
"""
|
kcyu1993/ML_course_kyu
|
labs/ex01/npprimer.ipynb
|
mit
|
# Useful starting lines
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
%load_ext autoreload
%autoreload 2
"""
Explanation: Welcome to the jupyter notebook! To run any cell, press Shit+Enter or Ctrl+Enter.
IMPORTANT : Please have a look at Help->User Interface Tour and Help->Keyboard Shortcuts in the toolbar above that will help you get started.
End of explanation
"""
1
x = [2,3,4]
def my_function(l):
l.append(12)
my_function(x)
x
# Matplotlib is used for plotting, plots are directly embedded in the
# notebook thanks to the '%matplolib inline' command at the beginning
plt.hist(np.random.randn(10000), bins=40)
plt.xlabel('X label')
plt.ylabel('Y label')
"""
Explanation: Notebook Basics
A cell contains any type of python inputs (expression, function definitions, etc...). Running a cell is equivalent to input this block in the python interpreter. The notebook will print the output of the last executed line.
End of explanation
"""
np.multiply
"""
Explanation: Numpy Basics
IMPORTANT : the numpy documentation is quite good. The Notebook system is really good to help you. Use the Auto-Completion with Tab, and use Shift+Tab to get the complete documentation about the current function (when the cursor is between the parenthesis of the function for instance).
For example, you want to multiply two arrays. np.mul + Tab complete to the only valid function np.multiply. Then using Shift+Tab you learn np.multiply is actually the element-wise multiplication and is equivalent to the * operator.
End of explanation
"""
np.zeros(4)
np.eye(3)
np.array([[1,3,4],[2,5,6]])
np.arange(10) # NB : np.array(range(10)) is a slightly more complicated equivalent
np.random.randn(3, 4) # normal distributed values
# 3-D tensor
tensor_3 = np.ones((2, 4, 2))
tensor_3
"""
Explanation: Creation of arrays
Creating ndarrays (np.zeros, np.ones) is done by giving the shape as an iterable (List or Tuple). An integer is also accepted for one-dimensional array.
np.eye creates an identity matrix.
You can also create an array by giving iterables to it.
(NB : The random functions np.random.rand and np.random.randn are exceptions though)
End of explanation
"""
tensor_3.shape, tensor_3.dtype
a = np.array([[1.0, 2.0], [5.0, 4.0]])
b = np.array([[4, 3], [2, 1]])
(b.dtype, a.dtype) # each array has a data type (casting rules apply for int -> float)
np.array(["Mickey", "Mouse"]) # can hold more than just numbers
a = np.array([[1.0, 2.0], [5.0, 4.0]])
b = a # Copying the reference only
b[0,0] = 3
a
a = np.array([[1.0, 2.0], [5.0, 4.0]])
b = a.copy() # Deep-copy of the data
b[0,0] = 3
a
"""
Explanation: ndarray basics
A ndarray python object is just a reference to the data location and its characteristics.
All numpy operations applying on an array can be called np.function(a) or a.function() (i.e np.sum(a) or a.sum())
It has an attribute shape that returns a tuple of the different dimensions of the ndarray. It also has an attribute dtype that describes the type of data of the object (default type is float64)
WARNING because of the object structure, unless you call copy() copying the reference is not copying the data.
End of explanation
"""
np.ones((2, 4)) * np.random.randn(2, 4)
np.eye(3) - np.ones((3,3))
print(a)
print(a.shape) # Get shape
print(a.shape[0]) # Get size of first dimension
"""
Explanation: Basic operators are working element-wise (+, -, *, /)
When trying to apply operators for arrays with different sizes, they are very specific rules that you might want to understand in the future : http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
End of explanation
"""
print(a[0]) # Get first line (slice for the first dimension)
print(a[:, 1]) # Get second column (slice for the second dimension)
print(a[0, 1]) # Get first line second column element
"""
Explanation: Accessing elements and slicing
For people uncomfortable with the slicing of arrays, please have a look at the 'Indexing and Slicing' section of http://www.python-course.eu/numpy.php
End of explanation
"""
a = np.array([[1.0, 2.0], [5.0, 4.0]])
b = np.array([[4, 3], [2, 1]])
v = np.array([0.5, 2.0])
print(a)
print(a.T) # Equivalent : a.tranpose(), np.transpose(a)
print(a.ravel())
c = np.random.randn(4,5)
print(c.shape)
print(c[np.newaxis].shape) # Adding a dimension
print(c.T.shape)
print(c.reshape([10,2]).shape)
print(c)
print(c.reshape([10,2]))
a.reshape((-1, 1)) # a[-1] means 'whatever needs to go there'
"""
Explanation: Changing the shape of arrays
ravel creates a flattened view of an array (1-D representation) whereas flatten creates flattened copy of the array.
reshape allows in-place modification of the shape of the data. transpose shuffles the dimensions.
np.newaxis allows the creation of empty dimensions.
End of explanation
"""
np.sum(a), np.sum(a, axis=0), np.sum(a, axis=1) # reduce-operations reduce the whole array if no axis is specified
"""
Explanation: Reduction operations
Reduction operations (np.sum, np.max, np.min, np.std) work on the flattened ndarray by default. You can specify the reduction axis as an argument
End of explanation
"""
np.dot(a, b) # matrix multiplication
# Other ways of writing matrix multiplication, the '@' operator for matrix multiplication
# was introduced in Python 3.5
np.allclose(a.dot(b), a @ b)
# For other linear algebra operations, use the np.linalg module
np.linalg.eig(a) # Eigen-decomposition
print(np.linalg.inv(a)) # Inverse
np.allclose(np.linalg.inv(a) @ a, np.identity(a.shape[1])) # a^-1 * a = Id
np.linalg.solve(a, v) # solves ax = v
"""
Explanation: Linear-algebra operations
End of explanation
"""
np.hstack([a, b])
np.vstack([a, b])
np.vstack([a, b]) + v # broadcasting
np.hstack([a, b]) + v # does not work
np.hstack([a, b]) + v.T # transposing a 1-D array achieves nothing
np.hstack([a, b]) + v.reshape((-1, 1)) # reshaping to convert v from a (2,) vector to a (2,1) matrix
np.hstack([a, b]) + v[:, np.newaxis] # equivalently, we can add an axis
"""
Explanation: Grouping operations
Grouping operations (np.stack, np.hstack, np.vstack, np.concatenate) take an iterable of ndarrays and not ndarrays as separate arguments : np.concatenate([a,b]) and not np.concatenate(a,b).
End of explanation
"""
r = np.random.random_integers(0, 9, size=(3, 4))
r
r[0], r[1]
r[0:2]
r[1][2] # regular python
r[1, 2] # numpy
r[:, 1:3]
"""
Explanation: Working on subset of the elements
We have two ways in order to apply operations on subparts of arrays (besides slicing).
Slicing reminders
End of explanation
"""
r > 5 # Binary element-wise result
r[r > 5] # Use the binary mask as filter
r[r > 5] = 999 # Modify the corresponding values with a constant
r
"""
Explanation: Binary masks
Using logical operations on arrays give a binary mask. Using a binary mask as indexing acts as a filter and outputs just the very elements where the value is True. This gives a memoryview of the array that can get modified.
End of explanation
"""
# Get the indices where the condition is true, gives a tuple whose length
# is the number of dimensions of the input array
np.where(r == 999)
print(np.where(np.arange(10) < 5)) # Is a 1-tuple
np.where(np.arange(10) < 5)[0] # Accessing the first element gives the indices array
np.where(r == 999, -10, r+1000) # Ternary condition, if True take element from first array, otherwise from second
r[(np.array([1,2]), np.array([2,2]))] # Gets the view corresponding to the indices. NB : iterable of arrays as indexing
"""
Explanation: Working with indices
The second way to work on subpart of arrays are through indices. Usually you'd use one array per dimension with matching indices.
WARNING : indices are usually slower than binary masks because it is harder to be parallelized by the underlying BLAS library.
End of explanation
"""
numbers = np.random.randn(1000, 1000)
%%timeit # Naive version
my_sum = 0
for n in numbers.ravel():
if n>0:
my_sum += n
%timeit np.sum(numbers > 0)
"""
Explanation: Working with arrays, examples
Thanks to all these tools, you should be able to avoid writing almost any for-loops which are extremely costly in Python (even more than in Matlab, because good JIT engines are yet to come). In case you really need for-loops for array computation (usually not needed but it happens) have a look at http://numba.pydata.org/ (For advanced users)
Counting the number of positive elements that satisfy a condition
End of explanation
"""
X = np.random.randn(10000)
%%timeit # Naive version
my_result = np.zeros(len(X))
for i, x in enumerate(X.ravel()):
my_result[i] = 1 + x + x**2 + x**3 + x**4
%timeit 1 + X + X**2 + X**3 + X**4
"""
Explanation: Compute polynomial for a lot of values
End of explanation
"""
X = np.random.randn(1000)
from scipy.fftpack import fft
plt.plot(fft(X).real)
"""
Explanation: Scipy
Scipy is a collection of libraries more specialized than Numpy. It is the equivalent of toolboxes in Matlab.
Have a look at their collection : http://docs.scipy.org/doc/scipy-0.18.0/reference/
Many traditionnal functions are coded there.
End of explanation
"""
|
flothesof/LiveFFTPitchTracker
|
20150723_inversion.ipynb
|
bsd-2-clause
|
data = """79,05 102,40 115,40 126,10 217,50 240,70
82,4 101,5 114,1 123,1 215,8 239
81,90 104,80 113,20 121,50 214,20 237,50"""
data = data.replace(',', '.')
lines = data.split('\n')
values = [line.split('\t') for line in lines]
values
import numpy as np
import pandas as pd
s = pd.DataFrame(values)
s
s.values.astype(np.float)
df = pd.DataFrame(s.values.astype(np.float), columns=range(6))
df
"""
Explanation: 1ère partie : anciennes cordes, $\mu_i$ mal choisis
Importation des données après deux désaccordages
On va tout d'abord importer les données mesurées pour en déduire les $\rho_i$.
End of explanation
"""
df ** 2
(df ** 2).ix[0][0]
"""
Explanation: Il nous faut construire le vecteur des $\Delta T _ i$ à partir des mesures de fréquences. On construit d'abord le vecteur des $F_i$
End of explanation
"""
freqs = np.array([82.4, 110., 146.8, 196., 246.9, 329.6]) # frequencies of the guitar strings, from low E to high E, in Hz
calibration_tensions = np.array([9.59, 11.61, 11.22, 8.43, 8.09, 8.9]) * 9.81 # calibration tensions found on package (in kg) converted to N
mu = calibration_tensions / (4 * 0.648**2 * freqs**2)
mu
psi = 4 * 0.648**2 * mu
psi
T = (df ** 2).values * psi
T
"""
Explanation: note importante : j'utilise ici des $mu_i$ qui sont lus sur une pochette de cordes. Je n'ai aucune garantie que cela soient bien les vrais $mu_i$ nécessaires aux cordes qui sont sur ma guitare !
End of explanation
"""
T_end = T[1, :]
T_start = T[0, :]
mat = np.zeros((5, 6))
dT = T_end - T_start
dT
mat
"""
Explanation: attention les tensions sont bien en Newton, pas en kg comme dans la feuille Numbers.
Construction de la matrice à inverser pour obtenir les raideurs du modèle
On peut maintenant construire une première matrice de 5 lignes fois 6 colonnes.
End of explanation
"""
np.nonzero(dT > 0)
tuned_string = np.nonzero(dT > 0)[0]
assert tuned_string.size == 1
tuned_string = tuned_string[0]
tuned_string
cnt = 0
for string in range(6):
if string == tuned_string:
continue
else:
for other_string in range(6):
if other_string == tuned_string:
mat[cnt, other_string] = 0
elif other_string == string:
mat[cnt, other_string] = dT[tuned_string] + dT[string]
else:
mat[cnt, other_string] = dT[string]
cnt += 1
mat[0]
mat[1]
mat
"""
Explanation: On s'attend à ce qu'il n'y ait qu'une valeur qui a augmenté, ici c'est la première.
End of explanation
"""
def make_matrix(T_end, T_start):
""" builds the matrix that describes the effect of the individual rho_i on the
overall tuning change
M * [rho_i] = [dT]
"""
mat = np.zeros((5, 6))
dT = T_end - T_start
upstrings = np.nonzero(dT > 0)[0]
downstrings = np.nonzero(dT < 0)[0]
if (upstrings.size == 5) and (downstrings.size == 1):
tuned_string = downstrings[0]
elif (upstrings.size == 1) and (downstrings.size == 5):
tuned_string = upstrings[0]
else:
raise Exception('problem: no changed string was detected!')
cnt = 0
for string in range(6):
if string == tuned_string:
continue
else:
for other_string in range(6):
if other_string == tuned_string:
mat[cnt, other_string] = 0
elif other_string == string:
mat[cnt, other_string] = dT[tuned_string] + dT[string]
else:
mat[cnt, other_string] = dT[string]
cnt += 1
rhs = -dT[[_ for _ in range(6) if _ != tuned_string]]
return mat, rhs
make_matrix(T_end, T_start)
"""
Explanation: On peut écrire une fonction avec le code précédent :
End of explanation
"""
dT
"""
Explanation: On vérifie que le RHS est correct :
End of explanation
"""
mat1, rhs1 = make_matrix(T[1, :], T[0, :])
mat2, rhs2 = make_matrix(T[2, :], T[1, :])
mat2
rhs2
"""
Explanation: On peut maintenant construire les deux matrices :
End of explanation
"""
total_mat = np.vstack((mat1, mat2))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis]))
total_mat
total_rhs
"""
Explanation: On peut les concaténer :
End of explanation
"""
total_mat.shape
total_rhs.shape
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
err
rho
err
"""
Explanation: Inversion du système ainsi construit
On peut maintenant inverser le système :
End of explanation
"""
np.dot(mat1, rho)
rhs1
"""
Explanation: On a la solution !
cependant on note que les raideurs ne sont pas exactement dans l'ordre croissant !! Problème ou pas ?
Vérification des prédictions du modèle
On peut maintenant voir si ces coefs permettent de bien prédire la variation de tension observée. On peut tout simplement repartir des matrices mat1 et mat2 et vérifier que l'on tombe bien sur ce qu'on veut.
End of explanation
"""
np.dot(mat2, rho)
rhs2
"""
Explanation: Mouai...
End of explanation
"""
tuning_mat = np.zeros((6, 6))
for other_string in range(6):
for tuning_string in range(6):
if tuning_string == other_string:
tuning_mat[other_string, tuning_string] = 1.
else:
tuning_mat[other_string, tuning_string] = \
psi[tuning_string] / psi[other_string] * \
(- rho[other_string] / (1 + np.sum([rho[i] for i in range(6) if i != tuning_string])))
tuning_mat
np.dot(tuning_mat, np.array([1, 0, 0, 0, 0, 0]))
"""
Explanation: Ceci est peut être dû à l'erreur à l'issue de la régression linéaire... on va quand même essayer d'implémenter la méthode d'inversion pour l'accordage.
ou au mauvais $\mu_i$, qui ont été choisis... au hasard ?
Construction de la matrice nécessaire à la méthode d'accordage
Il faut ici encore construire la matrice responsable de l'accordage.
End of explanation
"""
psi[0] / psi[1] * (- rho[1] / (1 + np.sum([rho[k] for k in range(6) if k != 0])))
"""
Explanation: On vérifie que ces termes sont les bons.
End of explanation
"""
def compute_tuning_matrix(psi, rho):
tuning_mat = np.zeros((6, 6))
for other_string in range(6):
for tuning_string in range(6):
if tuning_string == other_string:
tuning_mat[other_string, tuning_string] = 1.
else:
tuning_mat[other_string, tuning_string] = \
psi[tuning_string] / psi[other_string] * \
(- rho[other_string] / (1 + np.sum([rho[i] for i in range(6) if i != tuning_string])))
return tuning_mat
compute_tuning_matrix(psi, rho)
"""
Explanation: On écrit la fonction pour calculer la matrice :
End of explanation
"""
freqs
"""
Explanation: On peut maintenant inverser la matrice en calculant l'accordage cible.
End of explanation
"""
target_freqs = freqs.copy()
current_freqs = df.values[2, :]
"""
Explanation: Le $\Delta f$ à appliquer peut se calculer en faisant la différence à obtenir :
End of explanation
"""
target_freqs - current_freqs
"""
Explanation: L'écart que l'on cherche à obtenir est donc :
End of explanation
"""
target_dF = (target_freqs - current_freqs) ** 2
Delta_F = np.linalg.solve(tuning_mat, target_dF)
Delta_F
"""
Explanation: On doit le mettre au carré :
End of explanation
"""
np.sqrt(Delta_F)
np.sqrt(np.dot(tuning_mat, Delta_F))
current_freqs
current_freqs + np.sqrt(np.dot(tuning_mat, Delta_F))
"""
Explanation: On doit prendre la racine de ce grand F pour trouver les fréquences à appliquer :
End of explanation
"""
np.sqrt(Delta_F)
"""
Explanation: Cela devrait marcher. Voyons voir quels deltas de fréquence il faut imposer.
End of explanation
"""
current_freqs + np.sqrt(Delta_F)
"""
Explanation: Essai pratique
Testons !
End of explanation
"""
freqs = np.array([82.4, 110., 146.8, 196., 246.9, 329.6]) # frequencies of the guitar strings, from low E to high E, in Hz
calibration_tensions = np.array([7.94, 8.84, 8.34, 6.67, 4.99, 5.94]) * 9.81 # calibration tensions found on package (in kg) converted to N
mu = calibration_tensions / (4 * 0.648**2 * freqs**2)
mu
"""
Explanation: Conclusion : ça n'a pas marché et le pont n'a pas arrêté de monter. Je pense que cela vient d'autre chose : la tension du set de cordes n'était pas adaptée !!!
Je change donc de set de cordes et je recommence.
2ème partie : nouvelles cordes, avec propriétés connues
J'ai mis des nouvelles cordes dont les propriétés sont les suivantes :
mi aigu .009 5.94 kg
si .011 4.99 kg
sol .016 6.67 kg
ré .026 8.34 kg
la .036 8.84 kg
mi grave .046 7.94 kg
On en déduit les $\mu_i$ :
End of explanation
"""
psi = 4 * 0.648**2 * mu
psi
"""
Explanation: J'ai remesuré la longueur des cordes et j'ai trouvé 65 cm. Je décide de garder la longueur standard de 0.648 m.
On en déduit les Psi_i :
End of explanation
"""
lines = """83,55 94,70 193,7 138,8 203 190
89,2 93,3 192,55 135,2 200,55 186,9
87,8 99,2 191,25 130,9 197,85 183,7""".replace(',', '.').split('\n')
history = np.array([line.split('\t') for line in lines], dtype=np.float)
history
T = (history ** 2) * psi
mat1, rhs1 = make_matrix(T[1, :], T[0, :])
mat2, rhs2 = make_matrix(T[2, :], T[1, :])
total_mat = np.vstack((mat1, mat2))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis]))
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
rho
err
"""
Explanation: On peut maintenant remesurer les différentes fréquences sur la guitare :
End of explanation
"""
np.dot(mat1, rho)
rhs1
np.dot(mat2, rho)
rhs2
np.dot(total_mat, rho)
total_rhs
"""
Explanation: Cette fois-ci on constate que les rho_i sont dans l'ordre !
End of explanation
"""
tuning_mat = compute_tuning_matrix(psi, rho)
tuning_mat
target_freqs = freqs.copy()
current_freqs = history[2, :]
target_freqs - current_freqs
target_dF = (target_freqs - current_freqs) ** 2
Delta_F = np.linalg.solve(tuning_mat, target_dF)
Delta_F
np.sqrt(Delta_F)
for _ in np.sqrt(Delta_F):
print("{:.2f}".format(_))
"""
Explanation: Bizarre la vérification...
End of explanation
"""
freqs = np.array([82.4, 110., 146.8, 196., 246.9, 329.6]) # frequencies of the guitar strings, from low E to high E, in Hz
calibration_tensions = np.array([7.94, 8.84, 8.34, 6.67, 4.99, 5.94]) * 9.81 # calibration tensions found on package (in kg) converted to N
mu = calibration_tensions / (4 * 0.648**2 * freqs**2)
psi = 4 * 0.648**2 * mu
"""
Explanation: 3ème partie : travail sur la méthode d'accordage
Un des problèmes des parties précédentes provient du fait que l'accordage modifie au fur et à mesure les tensions des cordes et que lorsqu'on veut accorder la deuxième corde, on se pose la question de sa tension cible, qui a déjà changé par rapport aux conditions initiales.
Il y a deux possibilités :
calculer la tension d'arrivée à l'issue de l'accordage de la première corde prédite par le modèle pour toutes les cordes et l'utiliser pour calculer la tension cible de la deuxième corde
remesurer toutes les tensions des cordes et ajouter l'incrément de tension nécessaire à la deuxième corde selon ce qui a été calculé par le modèle
Idéalement, c'est à dire si le modèle est correct et que les paramètres ont été bien estimés, les deux méthodes se rejoignent.
Dans cette partie, nous allons confronter dans un premier temps les variations mesurées à l'issue d'un désaccordage avec les variations prédites. Dans un deuxième temps, nous conclurons sur la méthode à utiliser parmi les deux possibilités évoquées ci-dessus.
Validation précise du modèle
Plan de cette partie :
ré-inverser le modèle avec des nouvelles mesures indépendantes de la première série ci-dessus ; comparer les rho_i obtenus (on s'attend à trouver à peu près la même chose !)
faire les étapes d'accordage une par une et comparer à chaque étape la fréquence mesurée sur la guitare pour chaque corde ainsi que celle prédite par la méthode
Inversion du modèle
Propriétés de la guitare connues à l'avance :
End of explanation
"""
history = np.array([[84.6,111.4,148.8,193.8,244.3,328.7],
[82.1,111.6,149.0,194.1,244.5,329.0],
[81.8,114.6,148.8,193.7,244.2,328.7]])
history
T = (history ** 2) * psi
T
"""
Explanation: Afin de faire les prochaines mesures, on écrit la partie restante de l'application avec PyQt.
On désaccorde successivement le mi grave et le la pour obtenir les relevés suivants (à 0.1 Hz) :
End of explanation
"""
T.sum(axis=1) / 9.81
"""
Explanation: On peut regarder la somme des tensions (et la remettre en kg pour avoir une petite idée) :
End of explanation
"""
mat1, rhs1 = make_matrix(T[1, :], T[0, :])
mat2, rhs2 = make_matrix(T[2, :], T[1, :])
total_mat = np.vstack((mat1, mat2))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis]))
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
rho
err
np.dot(total_mat, rho)
total_rhs
"""
Explanation: On constate qu'on reste proche, mais qu'on ne conserve pas exactement les tensions. Je me demande ce que prédit le modèle à ce sujet.
End of explanation
"""
tuning_mat = compute_tuning_matrix(psi, rho)
tuning_mat
"""
Explanation: Cette fois-ci, la prédiction n'est pas si mauvaise...
Prédictions successives du modèle
On peut maintenant calculer la matrice d'accord (dans le cadre du modèle linéaire).
End of explanation
"""
def predict_changes(initial_T, final_T, tuning_mat):
"""predicts changes in tuning (frequency) from a vector of tensions"""
print("initial tunings: {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}".format(*[x for x in np.sqrt(initial_T))]))
print("final tunings: {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}".format(*[x for x in np.sqrt(final_T))]))
print("predicted tunings")
"""
Explanation: Et maintenant, on peut prédire les changements à partir d'un état vers un autre :
End of explanation
"""
target_freqs = freqs.copy()
current_freqs = history[2, :]
target_dF = target_freqs**2 - current_freqs**2
Delta_F = np.linalg.solve(tuning_mat, target_dF)
Delta_F
np.sqrt(current_freqs**2 + np.dot(tuning_mat, Delta_F))
target_freqs
"""
Explanation: attention au calcul des target_dF, c'est :
$$
\delta F = f_{target}^2 - f_{current}^2
$$
End of explanation
"""
print("initial: {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}".format(*[x for x in current_freqs]))
for step in range(6):
new_F = np.sqrt(current_freqs**2 + np.dot(tuning_mat, Delta_F * (np.arange(6) <= step)))
print(" step {}: {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}".format(step, *[x for x in new_F]))
"""
Explanation: On peut maintenant écrire la séquence attendue :
End of explanation
"""
def tuning_step(tuning_mat, initial_freqs, Delta_F, step_number):
"""predicts observed tuning as a function of tuning step
convention: step 0 means nothing has changed"""
step = step_number - 1
if step == -1:
return initial_freqs
return np.sqrt(initial_freqs**2 + np.dot(tuning_mat, Delta_F * (np.arange(6) <= step)))
print_strings = lambda v: print("{:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}, {:.1f}".format(*v))
"""
Explanation: On peut écrire une fonction qui facilite le calcul :
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
initial_freqs = current_freqs.copy()
"""
Explanation: Etape 0 : le programme
End of explanation
"""
for target, string in zip([tuning_step(tuning_mat, initial_freqs, Delta_F, i)[i-1] for i in range(1, 7)],
["low E", "A", "D", "G", "B", "high E"]):
print("string: {}, target frequency: {:.1f}".format(string, target))
"""
Explanation: Les fréquences cibles des cordes sont :
End of explanation
"""
measured_freqs = [82.0,114.5,148.5,193.5,244.0,328.7]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 1)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: Etape 1 : mi grave
End of explanation
"""
measured_freqs = [82.3,110.0,148.8,194.1,244.5,329.2]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 2)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: La
End of explanation
"""
measured_freqs = [82.4,110.2,146.9,194.4,244.7,329.5]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 3)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: Ré
End of explanation
"""
measured_freqs = [82.4,110.2,147.0,196.1,244.6,329.2]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 4)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: Sol
End of explanation
"""
measured_freqs = [82.4,110.2,146.9,196.3,246.8,329.2]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 5)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: Si
End of explanation
"""
measured_freqs = [82.4,110.1,146.9,196.3,246.7,329.6]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 6)
print_strings(measured_freqs)
print_strings(expected_freqs)
plt.plot(measured_freqs, 'o')
plt.plot(expected_freqs, 'o')
"""
Explanation: Mi aigu
End of explanation
"""
freqs = np.array([82.4, 110., 146.8, 196., 246.9, 329.6]) # frequencies of the guitar strings, from low E to high E, in Hz
calibration_tensions = np.array([7.94, 8.84, 8.34, 6.67, 4.99, 5.94]) * 9.81 # calibration tensions found on package (in kg) converted to N
mu = calibration_tensions / (4 * 0.648**2 * freqs**2)
psi = 4 * 0.648**2 * mu
history = np.array([[83.7,112.7,151.3,204.2,251.2,333.4],
[84.9,112.3,150.9,203.3,250.5,332.8],
[85.0,109.4,151.1,203.9,250.9,333.0]])
history
T = (history ** 2) * psi
T.sum(axis=1) / 9.81
mat1, rhs1 = make_matrix(T[1, :], T[0, :])
mat2, rhs2 = make_matrix(T[2, :], T[1, :])
total_mat = np.vstack((mat1, mat2))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis]))
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
rho
err
np.dot(total_mat, rho)
total_rhs
"""
Explanation: J'ai envie de penser que cela suffit.
à faire demain :
refaire une inversion
refaire un accordage en quantifiant l'erreur en demi-tons par rapport à la note cible
20150811
Effet du serrage des boulons en tête de guitare sur l'accordage :
avant : 83.3,112.8,150.0,201.3,250.2,334.3
après : 83.1,112.9,149.7,201.3,250.4,334.5
Conclusion : petit effet, on va négliger ça.
J'utilise le protocole d'accordage suivant (attention différent de celui de l'étape précédente où j'avais descendu le mi et monté le la) :
je monte le mi grave
je descend le la
Au préalable, je met toutes les vis dans leurs positions neutres sur le vibrato pour le fine tuning.
Inversion
End of explanation
"""
history = np.vstack((history,
np.array([82.3,109.5,151.3,204.0,251.1,333.3])))
history
T = (history ** 2) * psi
mat3, rhs3 = make_matrix(T[3, :], T[2, :])
total_mat = np.vstack((mat1, mat2, mat3))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis],
rhs3[:, np.newaxis]))
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
rho
err
np.dot(total_mat, rho)
total_rhs
"""
Explanation: On peut remarquer que l'inversion ne donne pas des valeurs satisfaisantes car les signes du produit matrice vecteur ne respectent pas celles observées (que des moins ou que des plus). On peut imaginer faire plus de mesures pour voir ce que ça donne : je vais rajouter un désaccordage.
End of explanation
"""
history = np.array([[81.08,110.53,151.17,203.89,251.09,333.24],
[82.9,110.47,151.04,203.57,250.96,333.19],
[82.87,112.37,150.96,203.42,250.69,333.02]])
history
T = (history ** 2) * psi
T.sum(axis=1) / 9.81
mat1, rhs1 = make_matrix(T[1, :], T[0, :])
mat2, rhs2 = make_matrix(T[2, :], T[1, :])
total_mat = np.vstack((mat1, mat2))
total_rhs = np.vstack((rhs1[:, np.newaxis],
rhs2[:, np.newaxis]))
rho, err, rank, eigs = np.linalg.lstsq(total_mat, total_rhs)
rho
err
np.dot(total_mat, rho)
total_rhs
"""
Explanation: On laisse tomber et on recommence la procédure de calibration, avec un précision supérieure :
End of explanation
"""
tuning_mat = compute_tuning_matrix(psi, rho)
target_freqs = freqs.copy()
current_freqs = history[2, :]
target_dF = target_freqs**2 - current_freqs**2
Delta_F = np.linalg.solve(tuning_mat, target_dF)
Delta_F
initial_freqs = current_freqs.copy()
initial_freqs
for target, string in zip([tuning_step(tuning_mat, initial_freqs, Delta_F, i)[i-1] for i in range(1, 7)],
["low E", "A", "D", "G", "B", "high E"]):
print("string: {}, target frequency: {:.1f}".format(string, target))
"""
Explanation: Tout est de même signe, c'est pas si mal. Et l'erreur est faible.
Accordage
End of explanation
"""
measured_freqs = [82.18,112.37,150.94,203.43,250.67,333.08]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 1)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: Mi grave :
End of explanation
"""
measured_freqs = [82.44,109.75,150.91,203.92,251.13,333.26]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 2)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: La :
End of explanation
"""
measured_freqs = [82.66,110.12,146.65,204.46,251.57,333.51]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 3)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: Re :
End of explanation
"""
measured_freqs = [82.86,110.39,146.96,195.81,251.94,333.99]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 4)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: Sol :
End of explanation
"""
measured_freqs = [82.97,110.52,147.1,196.29,246.53,334.24]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 5)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: Si :
End of explanation
"""
measured_freqs = [83.2,110.75,147.17,196.55,246.67,329.84]
expected_freqs = tuning_step(tuning_mat, initial_freqs, Delta_F, 6)
print_strings(measured_freqs)
print_strings(expected_freqs)
"""
Explanation: Mi :
End of explanation
"""
|
yingchi/fastai-notes
|
deeplearning1/nbs/lesson6.ipynb
|
apache-2.0
|
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
"""
Explanation: Setup
We're going to download the collected works of Nietzsche to use as our data for this class.
End of explanation
"""
chars.insert(0, "\0")
''.join(chars[1:-6])
"""
Explanation: Sometimes it's useful to have a zero value in the dataset, e.g. for padding
End of explanation
"""
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
"""
Explanation: Map from chars to indices and back again
End of explanation
"""
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
"""
Explanation: idx will be the data we use from now own - it simply converts all the characters to their index (based on the mapping above)
End of explanation
"""
cs=3
c1_dat = [idx[i] for i in xrange(0, len(idx)-1-cs, cs)]
c2_dat = [idx[i+1] for i in xrange(0, len(idx)-1-cs, cs)]
c3_dat = [idx[i+2] for i in xrange(0, len(idx)-1-cs, cs)]
c4_dat = [idx[i+3] for i in xrange(0, len(idx)-1-cs, cs)]
"""
Explanation: 3 char model
Create inputs
Create a list of every 4th character, starting at the 0th, 1st, 2nd, then 3rd characters
End of explanation
"""
x1 = np.stack(c1_dat[:-2])
x2 = np.stack(c2_dat[:-2])
x3 = np.stack(c3_dat[:-2])
"""
Explanation: Our inputs
End of explanation
"""
y = np.stack(c4_dat[:-2])
"""
Explanation: Our output
End of explanation
"""
x1[:4], x2[:4], x3[:4]
y[:4]
x1.shape, y.shape
"""
Explanation: The first 4 inputs and outputs
End of explanation
"""
n_fac = 42
"""
Explanation: The number of latent factors to create (i.e. the size of the embedding matrix)
End of explanation
"""
def embedding_input(name, n_in, n_out):
inp = Input(shape=(1,), dtype='int64', name=name)
emb = Embedding(n_in, n_out, input_length=1)(inp)
return inp, Flatten()(emb)
c1_in, c1 = embedding_input('c1', vocab_size, n_fac)
c2_in, c2 = embedding_input('c2', vocab_size, n_fac)
c3_in, c3 = embedding_input('c3', vocab_size, n_fac)
"""
Explanation: Create inputs and embedding outputs for each of our 3 character inputs
End of explanation
"""
n_hidden = 256
"""
Explanation: Create and train model
Pick a size for our hidden state
End of explanation
"""
dense_in = Dense(n_hidden, activation='relu')
"""
Explanation: This is the 'green arrow' from our diagram - the layer operation from input to hidden.
End of explanation
"""
c1_hidden = dense_in(c1)
"""
Explanation: Our first hidden activation is simply this function applied to the result of the embedding of the first character.
End of explanation
"""
dense_hidden = Dense(n_hidden, activation='tanh')
"""
Explanation: This is the 'orange arrow' from our diagram - the layer operation from hidden to hidden.
End of explanation
"""
c2_dense = dense_in(c2)
hidden_2 = dense_hidden(c1_hidden)
c2_hidden = merge([c2_dense, hidden_2])
c3_dense = dense_in(c3)
hidden_3 = dense_hidden(c2_hidden)
c3_hidden = merge([c3_dense, hidden_3])
"""
Explanation: Our second and third hidden activations sum up the previous hidden state (after applying dense_hidden) to the new input state.
End of explanation
"""
dense_out = Dense(vocab_size, activation='softmax')
"""
Explanation: This is the 'blue arrow' from our diagram - the layer operation from hidden to output.
End of explanation
"""
c4_out = dense_out(c3_hidden)
model = Model([c1_in, c2_in, c3_in], c4_out)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.optimizer.lr=0.000001
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr=0.01
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr.set_value(0.000001)
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
model.optimizer.lr.set_value(0.01)
model.fit([x1, x2, x3], y, batch_size=64, nb_epoch=4)
"""
Explanation: The third hidden state is the input to our output layer.
End of explanation
"""
def get_next(inp):
idxs = [char_indices[c] for c in inp]
arrs = [np.array(i)[np.newaxis] for i in idxs]
p = model.predict(arrs)
i = np.argmax(p)
return chars[i]
get_next('phi')
get_next(' th')
get_next(' an')
"""
Explanation: Test model
End of explanation
"""
cs=8
"""
Explanation: Our first RNN!
Create inputs
This is the size of our unrolled RNN.
End of explanation
"""
c_in_dat = [[idx[i+n] for i in xrange(0, len(idx)-1-cs, cs)]
for n in range(cs)]
"""
Explanation: For each of 0 through 7, create a list of every 8th character with that starting point. These will be the 8 inputs to out model.
End of explanation
"""
c_out_dat = [idx[i+cs] for i in xrange(0, len(idx)-1-cs, cs)]
xs = [np.stack(c[:-2]) for c in c_in_dat]
len(xs), xs[0].shape
y = np.stack(c_out_dat[:-2])
"""
Explanation: Then create a list of the next character in each of these series. This will be the labels for our model.
End of explanation
"""
[xs[n][:cs] for n in range(cs)]
"""
Explanation: So each column below is one series of 8 characters from the text.
End of explanation
"""
y[:cs]
n_fac = 42
"""
Explanation: ...and this is the next character after each sequence.
End of explanation
"""
def embedding_input(name, n_in, n_out):
inp = Input(shape=(1,), dtype='int64', name=name+'_in')
emb = Embedding(n_in, n_out, input_length=1, name=name+'_emb')(inp)
return inp, Flatten()(emb)
c_ins = [embedding_input('c'+str(n), vocab_size, n_fac) for n in range(cs)]
n_hidden = 256
dense_in = Dense(n_hidden, activation='relu')
dense_hidden = Dense(n_hidden, activation='relu', init='identity')
dense_out = Dense(vocab_size, activation='softmax')
"""
Explanation: Create and train model
End of explanation
"""
hidden = dense_in(c_ins[0][1])
"""
Explanation: The first character of each sequence goes through dense_in(), to create our first hidden activations.
End of explanation
"""
for i in range(1,cs):
c_dense = dense_in(c_ins[i][1])
hidden = dense_hidden(hidden)
hidden = merge([c_dense, hidden])
"""
Explanation: Then for each successive layer we combine the output of dense_in() on the next character with the output of dense_hidden() on the current hidden state, to create the new hidden state.
End of explanation
"""
c_out = dense_out(hidden)
"""
Explanation: Putting the final hidden state through dense_out() gives us our output.
End of explanation
"""
model = Model([c[0] for c in c_ins], c_out)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.fit(xs, y, batch_size=64, nb_epoch=12)
"""
Explanation: So now we can create our model.
End of explanation
"""
def get_next(inp):
idxs = [np.array(char_indices[c])[np.newaxis] for c in inp]
p = model.predict(idxs)
return chars[np.argmax(p)]
get_next('for thos')
get_next('part of ')
get_next('queens a')
"""
Explanation: Test model
End of explanation
"""
n_hidden, n_fac, cs, vocab_size = (256, 42, 8, 86)
"""
Explanation: Our first RNN with keras!
End of explanation
"""
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs),
SimpleRNN(n_hidden, activation='relu', inner_init='identity'),
Dense(vocab_size, activation='softmax')
])
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
model.fit(np.concatenate(xs,axis=1), y, batch_size=64, nb_epoch=8)
def get_next_keras(inp):
idxs = [char_indices[c] for c in inp]
arrs = np.array(idxs)[np.newaxis,:]
p = model.predict(arrs)[0]
return chars[np.argmax(p)]
get_next_keras('this is ')
get_next_keras('part of ')
get_next_keras('queens a')
"""
Explanation: This is nearly exactly equivalent to the RNN we built ourselves in the previous section.
End of explanation
"""
#c_in_dat = [[idx[i+n] for i in xrange(0, len(idx)-1-cs, cs)]
# for n in range(cs)]
c_out_dat = [[idx[i+n] for i in xrange(1, len(idx)-cs, cs)]
for n in range(cs)]
ys = [np.stack(c[:-2]) for c in c_out_dat]
"""
Explanation: Returning sequences
Create inputs
To use a sequence model, we can leave our input unchanged - but we have to change our output to a sequence (of course!)
Here, c_out_dat is identical to c_in_dat, but moved across 1 character.
End of explanation
"""
[xs[n][:cs] for n in range(cs)]
[ys[n][:cs] for n in range(cs)]
"""
Explanation: Reading down each column shows one set of inputs and outputs.
End of explanation
"""
dense_in = Dense(n_hidden, activation='relu')
dense_hidden = Dense(n_hidden, activation='relu', init='identity')
dense_out = Dense(vocab_size, activation='softmax', name='output')
"""
Explanation: Create and train model
End of explanation
"""
inp1 = Input(shape=(n_fac,), name='zeros')
hidden = dense_in(inp1)
outs = []
for i in range(cs):
c_dense = dense_in(c_ins[i][1])
hidden = dense_hidden(hidden)
hidden = merge([c_dense, hidden], mode='sum')
# every layer now has an output
outs.append(dense_out(hidden))
model = Model([inp1] + [c[0] for c in c_ins], outs)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
zeros = np.tile(np.zeros(n_fac), (len(xs[0]),1))
zeros.shape
model.fit([zeros]+xs, ys, batch_size=64, nb_epoch=12)
"""
Explanation: We're going to pass a vector of all zeros as our starting point - here's our input layers for that:
End of explanation
"""
def get_nexts(inp):
idxs = [char_indices[c] for c in inp]
arrs = [np.array(i)[np.newaxis] for i in idxs]
p = model.predict([np.zeros(n_fac)[np.newaxis,:]] + arrs)
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts(' this is')
get_nexts(' part of')
"""
Explanation: Test model
End of explanation
"""
n_hidden, n_fac, cs, vocab_size
"""
Explanation: Sequence model with keras
End of explanation
"""
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs),
SimpleRNN(n_hidden, return_sequences=True, activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.summary()
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
xs[0].shape
x_rnn=np.stack(xs, axis=1)
y_rnn=np.expand_dims(np.stack(ys, axis=1), -1)
x_rnn.shape, y_rnn.shape
model.fit(x_rnn, y_rnn, batch_size=64, nb_epoch=8)
def get_nexts_keras(inp):
idxs = [char_indices[c] for c in inp]
arr = np.array(idxs)[np.newaxis,:]
p = model.predict(arr)[0]
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts_keras(' this is')
"""
Explanation: To convert our previous keras model into a sequence model, simply add the 'return_sequences=True' parameter, and add TimeDistributed() around our dense layer.
End of explanation
"""
model=Sequential([
SimpleRNN(n_hidden, return_sequences=True, input_shape=(cs, vocab_size),
activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='categorical_crossentropy', optimizer=Adam())
oh_ys = [to_categorical(o, vocab_size) for o in ys]
oh_y_rnn=np.stack(oh_ys, axis=1)
oh_xs = [to_categorical(o, vocab_size) for o in xs]
oh_x_rnn=np.stack(oh_xs, axis=1)
oh_x_rnn.shape, oh_y_rnn.shape
model.fit(oh_x_rnn, oh_y_rnn, batch_size=64, nb_epoch=8)
def get_nexts_oh(inp):
idxs = np.array([char_indices[c] for c in inp])
arr = to_categorical(idxs, vocab_size)
p = model.predict(arr[np.newaxis,:])[0]
print(list(inp))
return [chars[np.argmax(o)] for o in p]
get_nexts_oh(' this is')
"""
Explanation: One-hot sequence model with keras
This is the keras version of the theano model that we're about to create.
End of explanation
"""
bs=64
"""
Explanation: Stateful model with keras
End of explanation
"""
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs, batch_input_shape=(bs,8)),
BatchNormalization(),
LSTM(n_hidden, return_sequences=True, stateful=True),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
"""
Explanation: A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.
When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.
End of explanation
"""
mx = len(x_rnn)//bs*bs
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
model.optimizer.lr=1e-4
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
"""
Explanation: Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.
End of explanation
"""
n_input = vocab_size
n_output = vocab_size
"""
Explanation: Theano RNN
End of explanation
"""
def init_wgts(rows, cols):
scale = math.sqrt(2/rows)
return shared(normal(scale=scale, size=(rows, cols)).astype(np.float32))
def init_bias(rows):
return shared(np.zeros(rows, dtype=np.float32))
"""
Explanation: Using raw theano, we have to create our weight matrices and bias vectors ourselves - here are the functions we'll use to do so (using glorot initialization).
The return values are wrapped in shared(), which is how we tell theano that it can manage this data (copying it to and from the GPU as necessary).
End of explanation
"""
def wgts_and_bias(n_in, n_out):
return init_wgts(n_in, n_out), init_bias(n_out)
def id_and_bias(n):
return shared(np.eye(n, dtype=np.float32)), init_bias(n)
"""
Explanation: We return the weights and biases together as a tuple. For the hidden weights, we'll use an identity initialization (as recommended by Hinton.)
End of explanation
"""
t_inp = T.matrix('inp')
t_outp = T.matrix('outp')
t_h0 = T.vector('h0')
lr = T.scalar('lr')
all_args = [t_h0, t_inp, t_outp, lr]
"""
Explanation: Theano doesn't actually do any computations until we explicitly compile and evaluate the function (at which point it'll be turned into CUDA code and sent off to the GPU). So our job is to describe the computations that we'll want theano to do - the first step is to tell theano what inputs we'll be providing to our computation:
End of explanation
"""
W_h = id_and_bias(n_hidden)
W_x = wgts_and_bias(n_input, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
w_all = list(chain.from_iterable([W_h, W_x, W_y]))
"""
Explanation: Now we're ready to create our intial weight matrices.
End of explanation
"""
def step(x, h, W_h, b_h, W_x, b_x, W_y, b_y):
# Calculate the hidden activations
h = nnet.relu(T.dot(x, W_x) + b_x + T.dot(h, W_h) + b_h)
# Calculate the output activations
y = nnet.softmax(T.dot(h, W_y) + b_y)
# Return both (the 'Flatten()' is to work around a theano bug)
return h, T.flatten(y, 1)
"""
Explanation: Theano handles looping by using the GPU scan operation. We have to tell theano what to do at each step through the scan - this is the function we'll use, which does a single forward pass for one character:
End of explanation
"""
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
"""
Explanation: Now we can provide everything necessary for the scan operation, so we can setup that up - we have to pass in the function to call at each step, the sequence to step through, the initial values of the outputs, and any other arguments to pass to the step function.
End of explanation
"""
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
"""
Explanation: We can now calculate our loss function, and all of our gradients, with just a couple of lines of code!
End of explanation
"""
def upd_dict(wgts, grads, lr):
return OrderedDict({w: w-g*lr for (w,g) in zip(wgts,grads)})
upd = upd_dict(w_all, g_all, lr)
"""
Explanation: We even have to show theano how to do SGD - so we set up this dictionary of updates to complete after every forward pass, which apply to standard SGD update rule to every weight.
End of explanation
"""
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
X = oh_x_rnn
Y = oh_y_rnn
X.shape, Y.shape
"""
Explanation: We're finally ready to compile the function!
End of explanation
"""
err=0.0; l_rate=0.01
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
print ("Error:{:.3f}".format(err/1000))
err=0.0
f_y = theano.function([t_h0, t_inp], v_y, allow_input_downcast=True)
pred = np.argmax(f_y(np.zeros(n_hidden), X[6]), axis=1)
act = np.argmax(X[6], axis=1)
[indices_char[o] for o in act]
[indices_char[o] for o in pred]
"""
Explanation: To use it, we simply loop through our input data, calling the function compiled above, and printing our progress from time to time.
End of explanation
"""
def sigmoid(x): return 1/(1+np.exp(-x))
def sigmoid_d(x):
output = sigmoid(x)
return output*(1-output)
def relu(x): return np.maximum(0., x)
def relu_d(x): return (x > 0.)*1.
relu(np.array([3.,-3.])), relu_d(np.array([3.,-3.]))
def dist(a,b): return pow(a-b,2)
def dist_d(a,b): return 2*(a-b)
import pdb
eps = 1e-7
def x_entropy(pred, actual):
return -np.sum(actual * np.log(np.clip(pred, eps, 1-eps)))
def x_entropy_d(pred, actual): return -actual/pred
def softmax(x): return np.exp(x)/np.exp(x).sum()
def softmax_d(x):
sm = softmax(x)
res = np.expand_dims(-sm,-1)*sm
res[np.diag_indices_from(res)] = sm*(1-sm)
return res
test_preds = np.array([0.2,0.7,0.1])
test_actuals = np.array([0.,1.,0.])
nnet.categorical_crossentropy(test_preds, test_actuals).eval()
x_entropy(test_preds, test_actuals)
test_inp = T.dvector()
test_out = nnet.categorical_crossentropy(test_inp, test_actuals)
test_grad = theano.function([test_inp], T.grad(test_out, test_inp))
test_grad(test_preds)
x_entropy_d(test_preds, test_actuals)
pre_pred = random(oh_x_rnn[0][0].shape)
preds = softmax(pre_pred)
actual = oh_x_rnn[0][0]
np.allclose(softmax_d(pre_pred).dot(loss_d(preds,actual)), preds-actual)
softmax(test_preds)
nnet.softmax(test_preds).eval()
test_out = T.flatten(nnet.softmax(test_inp))
test_grad = theano.function([test_inp], theano.gradient.jacobian(test_out, test_inp))
test_grad(test_preds)
softmax_d(test_preds)
act=relu
act_d = relu_d
loss=x_entropy
loss_d=x_entropy_d
"""
Explanation: Pure python RNN!
Set up basic functions
Now we're going to try to repeat the above theano RNN, using just pure python (and numpy). Which means, we have to do everything ourselves, including defining the basic functions of a neural net! Below are all of the definitions, along with tests to check that they give the same answers as theano. The functions ending in _d are the derivatives of each function.
End of explanation
"""
def scan(fn, start, seq):
res = []
prev = start
for s in seq:
app = fn(prev, s)
res.append(app)
prev = app
return res
"""
Explanation: We also have to define our own scan function. Since we're not worrying about running things in parallel, it's very simple to implement:
End of explanation
"""
scan(lambda prev,curr: prev+curr, 0, range(5))
"""
Explanation: ...for instance, scan on + is the cumulative sum.
End of explanation
"""
inp = oh_x_rnn
outp = oh_y_rnn
n_input = vocab_size
n_output = vocab_size
inp.shape, outp.shape
"""
Explanation: Set up training
Let's now build the functions to do the forward and backward passes of our RNN. First, define our data and shape.
End of explanation
"""
def one_char(prev, item):
# Previous state
tot_loss, pre_hidden, pre_pred, hidden, ypred = prev
# Current inputs and output
x, y = item
pre_hidden = np.dot(x,w_x) + np.dot(hidden,w_h)
hidden = act(pre_hidden)
pre_pred = np.dot(hidden,w_y)
ypred = softmax(pre_pred)
return (
# Keep track of loss so we can report it
tot_loss+loss(ypred, y),
# Used in backprop
pre_hidden, pre_pred,
# Used in next iteration
hidden,
# To provide predictions
ypred)
"""
Explanation: Here's the function to do a single forward pass of an RNN, for a single character.
End of explanation
"""
def get_chars(n): return zip(inp[n], outp[n])
def one_fwd(n): return scan(one_char, (0,0,0,np.zeros(n_hidden),0), get_chars(n))
"""
Explanation: We use scan to apply the above to a whole sequence of characters.
End of explanation
"""
# "Columnify" a vector
def col(x): return x[:,newaxis]
def one_bkwd(args, n):
global w_x,w_y,w_h
i=inp[n] # 8x86
o=outp[n] # 8x86
d_pre_hidden = np.zeros(n_hidden) # 256
for p in reversed(range(len(i))):
totloss, pre_hidden, pre_pred, hidden, ypred = args[p]
x=i[p] # 86
y=o[p] # 86
d_pre_pred = softmax_d(pre_pred).dot(loss_d(ypred,y)) # 86
d_pre_hidden = (np.dot(d_pre_hidden, w_h.T)
+ np.dot(d_pre_pred,w_y.T)) * act_d(pre_hidden) # 256
# d(loss)/d(w_y) = d(loss)/d(pre_pred) * d(pre_pred)/d(w_y)
w_y -= col(hidden) * d_pre_pred * alpha
# d(loss)/d(w_h) = d(loss)/d(pre_hidden[p-1]) * d(pre_hidden[p-1])/d(w_h)
if (p>0): w_h -= args[p-1][3].dot(d_pre_hidden) * alpha
w_x -= col(x)*d_pre_hidden * alpha
return d_pre_hidden
"""
Explanation: Now we can define the backward step. We use a loop to go through every element of the sequence. The derivatives are applying the chain rule to each step, and accumulating the gradients across the sequence.
End of explanation
"""
scale=math.sqrt(2./n_input)
w_x = normal(scale=scale, size=(n_input,n_hidden))
w_y = normal(scale=scale, size=(n_hidden, n_output))
w_h = np.eye(n_hidden, dtype=np.float32)
"""
Explanation: Now we can set up our initial weight matrices. Note that we're not using bias at all in this example, in order to keep things simpler.
End of explanation
"""
overallError=0
alpha=0.0001
for n in range(10000):
res = one_fwd(n)
overallError+=res[-1][0]
deriv = one_bkwd(res, n)
if(n % 1000 == 999):
print ("Error:{:.4f}; Gradient:{:.5f}".format(
overallError/1000, np.linalg.norm(deriv)))
overallError=0
"""
Explanation: Our loop looks much like the theano loop in the previous section, except that we have to call the backwards step ourselves.
End of explanation
"""
model=Sequential([
GRU(n_hidden, return_sequences=True, input_shape=(cs, vocab_size),
activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='categorical_crossentropy', optimizer=Adam())
model.fit(oh_x_rnn, oh_y_rnn, batch_size=64, nb_epoch=8)
get_nexts_oh(' this is')
"""
Explanation: Keras GRU
Identical to the last keras rnn, but a GRU!
End of explanation
"""
W_h = id_and_bias(n_hidden)
W_x = init_wgts(n_input, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
rW_h = init_wgts(n_hidden, n_hidden)
rW_x = wgts_and_bias(n_input, n_hidden)
uW_h = init_wgts(n_hidden, n_hidden)
uW_x = wgts_and_bias(n_input, n_hidden)
w_all = list(chain.from_iterable([W_h, W_y, uW_x, rW_x]))
w_all.extend([W_x, uW_h, rW_h])
"""
Explanation: Theano GRU
Separate weights
The theano GRU looks just like the simple theano RNN, except for the use of the reset and update gates. Each of these gates requires its own hidden and input weights, so we add those to our weight matrices.
End of explanation
"""
def gate(x, h, W_h, W_x, b_x):
return nnet.sigmoid(T.dot(x, W_x) + b_x + T.dot(h, W_h))
"""
Explanation: Here's the definition of a gate - it's just a sigmoid applied to the addition of the dot products of the input vectors.
End of explanation
"""
def step(x, h, W_h, b_h, W_y, b_y, uW_x, ub_x, rW_x, rb_x, W_x, uW_h, rW_h):
reset = gate(x, h, rW_h, rW_x, rb_x)
update = gate(x, h, uW_h, uW_x, ub_x)
h_new = gate(x, h * reset, W_h, W_x, b_h)
h = update*h + (1-update)*h_new
y = nnet.softmax(T.dot(h, W_y) + b_y)
return h, T.flatten(y, 1)
"""
Explanation: Our step is nearly identical to before, except that we multiply our hidden state by our reset gate, and we update our hidden state based on the update gate.
End of explanation
"""
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
upd = upd_dict(w_all, g_all, lr)
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
err=0.0; l_rate=0.1
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
l_rate *= 0.95
print ("Error:{:.2f}".format(err/1000))
err=0.0
"""
Explanation: Everything from here on is identical to our simple RNN in theano.
End of explanation
"""
W = (shared(np.concatenate([np.eye(n_hidden), normal(size=(n_input, n_hidden))])
.astype(np.float32)), init_bias(n_hidden))
rW = wgts_and_bias(n_input+n_hidden, n_hidden)
uW = wgts_and_bias(n_input+n_hidden, n_hidden)
W_y = wgts_and_bias(n_hidden, n_output)
w_all = list(chain.from_iterable([W, W_y, uW, rW]))
def gate(m, W, b): return nnet.sigmoid(T.dot(m, W) + b)
def step(x, h, W, b, W_y, b_y, uW, ub, rW, rb):
m = T.concatenate([h, x])
reset = gate(m, rW, rb)
update = gate(m, uW, ub)
m = T.concatenate([h*reset, x])
h_new = gate(m, W, b)
h = update*h + (1-update)*h_new
y = nnet.softmax(T.dot(h, W_y) + b_y)
return h, T.flatten(y, 1)
[v_h, v_y], _ = theano.scan(step, sequences=t_inp,
outputs_info=[t_h0, None], non_sequences=w_all)
def upd_dict(wgts, grads, lr):
return OrderedDict({w: w-g*lr for (w,g) in zip(wgts,grads)})
error = nnet.categorical_crossentropy(v_y, t_outp).sum()
g_all = T.grad(error, w_all)
upd = upd_dict(w_all, g_all, lr)
fn = theano.function(all_args, error, updates=upd, allow_input_downcast=True)
err=0.0; l_rate=0.01
for i in range(len(X)):
err+=fn(np.zeros(n_hidden), X[i], Y[i], l_rate)
if i % 1000 == 999:
print ("Error:{:.2f}".format(err/1000))
err=0.0
"""
Explanation: Combined weights
We can make the previous section simpler and faster by concatenating the hidden and input matrices and inputs together. We're not going to step through this cell by cell - you'll see it's identical to the previous section except for this concatenation.
End of explanation
"""
|
aaronmckinstry706/twitter-crime-prediction
|
notebooks/tweets_exploration.ipynb
|
gpl-3.0
|
import os
import sys
# From https://stackoverflow.com/a/36218558 .
def sparkImport(module_name, module_directory):
"""
Convenience function.
Tells the SparkContext sc (must already exist) to load
module module_name on every computational node before
executing an RDD.
Args:
module_name: the name of the module, without ".py".
module_directory: the path, absolute or relative, to
the directory containing module
module_Name.
Returns: none.
"""
module_path = os.path.abspath(
module_directory + "/" + module_name + ".py")
sc.addPyFile(module_path)
# Add all scripts from repository to local path.
# From https://stackoverflow.com/a/35273613 .
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import preprocessing
sparkImport("preprocessing", "..")
tweets = sc.textFile("tweets.csv") \
.filter(preprocessing.format_is_correct) \
.map(preprocessing.split_record)
"""
Explanation: Tweet Count Analysis
This script explores the quantity of tweets we have over time: how many do we have in total? Are there significant periods containing few or even zero tweets? Are there duplicate tweets in the dataset?
Setup
Import the preprocessing functions (but do it in a funky way, because Jupyter notebook won't let me import from this notebook's parent directory), and get the tweets. Also, we have to tell the SparkContext that any computing nodes will need to load the preprocessing module before running.
End of explanation
"""
initial_count = tweets.count()
print("Total number of tweets: " + str(initial_count))
tweet_ids = tweets \
.map(lambda record: record[preprocessing.field_index['id']]) \
.distinct()
final_count = tweet_ids.count()
print("Number of duplicates: " + str(initial_count - final_count))
print("Number of distinct tweets: " + str(final_count))
"""
Explanation: Count 'Em
How many tweets total? How many duplicate tweets?
End of explanation
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as pyplot
def get_week(unix_timestamp):
# Add 3 to the day, because Unix timestamp 0 is on a Thursday.
return ((int(unix_timestamp) / secondsPerDay + 3) / 7)
secondsPerDay = 24*60*60
weekly_tweet_counts = tweets \
.map(
lambda record:
(get_week(record[preprocessing.field_index['timestamp']]), 1)) \
.countByKey()
"""
Explanation: Less than $1\%$ of our tweets are duplicates, so we have approximately the quantity of tweets that we thought we did.
Plot Counts Over Time
Look at distribution over time: per day, per week, and per month. Look for any large gaps, or frequent small gaps, that might limit how we use the data.
Tweets per Week
First, let's count how many tweets are in each week, where a week is defined as Monday - Friday. Note that this is independent of month and year, which makes it easier to both count and plot. (In the code below, the .use('Agg') command tells matplotlib to use Agg to display graphs 'n such; by default it is something else, which NYU HPC does not have installed/loaded.)
End of explanation
"""
for week_index in range(min(weekly_tweet_counts.keys()), max(weekly_tweet_counts.keys())):
if week_index not in weekly_tweet_counts.keys():
weekly_tweet_counts[week_index] = 0
"""
Explanation: Now we have the tweet counts as a dictionary of (week_index, count) pairs. Before we go further, we should fill in the any missing weeks with a 0 value.
End of explanation
"""
weekly_tweet_counts_list = sorted(weekly_tweet_counts.items())
weekly_tweet_counts_xy = zip(*weekly_tweet_counts_list)
week_indexes = weekly_tweet_counts_xy[0]
week_counts = weekly_tweet_counts_xy[1]
currentFigure = pyplot.figure()
pyplot.figure(currentFigure.number)
pyplot.bar(week_indexes, week_counts, width=1.0)
pyplot.title('Tweet Count per Week')
pyplot.xlabel('Week Index')
pyplot.ylabel('Tweet Count')
pyplot.xlim([min(week_indexes), max(week_indexes)])
pyplot.ylim([0, max(week_counts)])
pyplot.savefig("tweet_count_per_week.png")
"""
Explanation: After we've filled in missing weeks with a 0 value, we sort the pairs, then repackage them as a tuple of week indexes and a tuple of counts. Then we can pass these week indexes and counts to a bar plot function as x- and y-values, respectively.
End of explanation
"""
sorted_week_counts = sorted(week_counts)
currentFigure = pyplot.figure()
pyplot.figure(currentFigure.number)
pyplot.hist(sorted_week_counts, 40)
pyplot.title("Distribution of Weekly Tweet Counts")
pyplot.xlabel("Weekly Tweet Count")
pyplot.ylabel("Frequency")
pyplot.savefig("distribution_of_weekly_counts.png")
"""
Explanation: Unfortunately, we can't automatically display the figure in a Jupyter notebook on NYU's HPC server. So, we saved it to a file, and now we can display it below:
What do the Missing Tweets Mean?
We're using LDA to generate topic models over the previous 31 days. This means that, if we have no tweets, we have no topic model. However, if we don't have tweets for a particular time period, we can't fix that. It's more important to note that, if we have too few tweets, we have bad topic models. Bad topic model means a bad prediction, so we shouldn't predict on dates such that there are too few tweets. So how few tweets is "too few"? It isn't quite clear yet. Eventually, we will have to choose some method for determining this (there are several existing techniques) and run with it.
For now, however, we note that the quality of the LDA model should not vary significantly between any two predictions. One way to enforce some minimum quality is to choose a simple cutoff value $c_{\text{min}}$ for weekly tweet counts. If a week's tweet count falls below $c_{\text{min}}$, we shouldn't try to train an LDA model on any 31 days overlapping that week. How do we choose the value of $c_{\text{min}}$, though?
We will choose the cutoff value based on a histogram of the tweet counts. We should expect a top-heavy distribution of tweet counts (lots of high-range values), with a large gap in the middle (almost no middle values), followed by several smaller frequency bumps of low tweet counts (a few low values). Our cutoff value can be in the upper end of the large gap in the middle.
End of explanation
"""
c_min = 150000
def get_day(unix_timestamp):
return int(unix_timestamp) / (24*60*60)
tweets_per_day = tweets \
.map(lambda record:
(get_day(record[preprocessing.field_index['timestamp']]), 1)) \
.countByKey()
for day in range(min(tweets_per_day.keys()), max(tweets_per_day.keys())):
if day not in tweets_per_day.keys():
tweets_per_day[day] = 0
num_valid_days = 0
for day in range(min(tweets_per_day.keys()), max(tweets_per_day.keys())):
# check if day has enough tweets
valid_days = range(day - 31, day)
valid_day_counts = [tweets_per_day[past_day]
for past_day in valid_days]
if sum(valid_day_counts) > 4*c_min:
num_valid_days = num_valid_days + 1
print("Number of days satisfying our rule: " + str(num_valid_days))
"""
Explanation: Ignoring the frequency of weeks containing 0 tweets, it seems roughly that there are two overlapping normal curves: one centered at approximately $325000$, and another centered at approximately $220000$. As a rough, temporary guess, we can set the cutoff at somewhere between $100000$ and $150000$, the tail end of the normal curve with the lowest mean; we will choose $c_{\text{min}} = 150000$ as our cutoff, just to err on the side of having more tweets for our LDA model. So, the rule is that, given a day to make a prediction, we only make a prediction if the past 31 days does not overlap with any week containing less than $c_{\text{min}} = 150000$ tweets.
However, this is a cumbersome rule to program, and I am a bit lazy. Given that one month is approximately four weeks, we can come up with a simpler rule: only make a prediction if the last 31 days contains at least $4c_{\text{min}} = 600000$ tweets.
Now we have to make sure that we actually have days on which our alorithm is allowed to make predictions. Let's count the number of days which satisfy the rule we've come up with.
End of explanation
"""
|
susantabiswas/Natural-Language-Processing
|
Notebooks/Word_Prediction_using_Quadgrams_Memory_Efficient_Encoded_keys.ipynb
|
mit
|
#import the modules necessary
from nltk.util import ngrams
from collections import defaultdict
from collections import OrderedDict
import nltk
import string
import time
start_time = time.time()
"""
Explanation: Word prediction based on Quadgram
This program reads the corpus line by line so it is slower than the program which reads the corpus
in one go.This reads the corpus one line at a time loads it into the memory.Also this uses encoded keys making it even more memory efficient
Import modules
End of explanation
"""
#return: string
#arg:list,list,dict
#for encoding keys for the dictionary
#for encoding keys ,index has been used for each unique word
#for mapping keys with their index
def encodeKey(s,index,vocab_dict):
key = ''
#print (s)
for t in s:
#print (t)
if t not in vocab_dict:
vocab_dict[t] = index[0]
index[0] = index[0] + 1
key = key + str(vocab_dict[t]) + '#'
#print(key)
return key
"""
Explanation: Do preprocessing:
Encode keys for dictionary storage
End of explanation
"""
#returns: string
#arg: string
#remove punctuations and make the string lowercase
def removePunctuations(sen):
#split the string into word tokens
temp_l = sen.split()
i = 0
#changes the word to lowercase and removes punctuations from it
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
#spliting is being don here beacause in sentences line here---so after punctuation removal it should
#become "here so"
content = " ".join(temp_l)
return content
"""
Explanation: Remove the punctuations and lowercase the tokens
End of explanation
"""
#returns : void
#arg: string,dict,dict,dict,list
#loads the corpus for the dataset and makes the frequency count of quadgram and trigram strings
def loadCorupus(filename,tri_dict,quad_dict,vocab_dict,index):
w1 = '' #for storing the 3rd last word to be used for next token set
w2 = '' #for storing the 2nd last word to be used for next token set
w3 = '' #for storing the last word to be used for next token set
i = 0
sen = ''
token = []
with open(filename,'r') as file:
#read the data line by line
for line in file:
token = line.split()
i = 0
for word in token :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
token[i] = word.lower()
i=i+1
content = " ".join(token)
token = content.split()
if not token:
continue
#first add the previous words
if w2!= '':
token.insert(0,w2)
if w3!= '':
token.insert(1,w3)
#tokens for trigrams
temp1 = list(ngrams(token,3))
if w1!= '':
token.insert(0,w1)
#tokens for quadgrams
temp2 = list(ngrams(token,4))
#count the frequency of the trigram sentences
for t in temp1:
sen = encodeKey(t,index,vocab_dict)
tri_dict[sen] += 1
#count the frequency of the quadgram sentences
for t in temp2:
sen = encodeKey(t,index,vocab_dict)
quad_dict[sen] += 1
#then take out the last 3 words
n = len(token)
w1 = token[n -3]
w2 = token[n -2]
w3 = token[n -1]
"""
Explanation: Tokenize the corpus data
End of explanation
"""
#returns : float
#arg : string sentence,string word,dict,dict
def findprobability(s,w,tri_dict,quad_dict):
c1 = 0 # for count of sentence 's' with word 'w'
c2 = 0 # for count of sentence 's'
s1 = s + w
if s1 in quad_dict:
c1 = quad_dict[s1]
if s in tri_dict:
c2 = tri_dict[s]
if c2 == 0:
return 0
return c1/c2
"""
Explanation: Find the probability
End of explanation
"""
#arg: list
#return: string,dict
#for decoding keys
def decodeKey(s,vocab_dict):
key = ''
l = []
item = list(vocab_dict.items())
temp_l = s.split('#')
del temp_l[len(temp_l)-1]
index = 0
for c in temp_l:
if c != ' ':
index = int(c)
l.append(item[index][0])
key = ' '.join(l)
return key
"""
Explanation: Decode key
End of explanation
"""
#returns : void
#arg: string,dict,dict,dict,list
def doPrediction(sen,tri_dict,quad_dict,vocab_dict,index):
#remove punctuations and make it lowercase
temp_l = sen.split()
i = 0
for word in temp_l :
for l in word :
if l in string.punctuation:
word = word.replace(l," ")
temp_l[i] = word.lower()
i=i+1
content = " ".join(temp_l)
temp_l = content.split()
#encode the sentence before checking
sen = encodeKey(temp_l,index,vocab_dict)
max_prob = 0
#when there is no probable word available
#now for guessing the word which should exist we use quadgram
right_word = 'apple'
for word in vocab_dict:
#print(word)
#encode the word before checking
dict_l = []
dict_l.append(word)
word = encodeKey(dict_l,index,vocab_dict)
prob = findprobability(sen,word,tri_dict,quad_dict)
if prob > max_prob:
max_prob = prob
right_word = word
#decode the right word
right_word = decodeKey(right_word,vocab_dict)
print('Word Prediction is :',right_word)
def main():
tri_dict = defaultdict(int)
quad_dict = defaultdict(int)
vocab_dict = OrderedDict() #for mapping of words with their index ==> key:word value:index of key in dict\n",
index = [0] #list for assigning index value to keys\n",
loadCorupus('mycorpus.txt',tri_dict,quad_dict,vocab_dict,index)
cond = False
#take input
while(cond == False):
sen = input('Enter the string\n')
sen = removePunctuations(sen)
temp = sen.split()
if len(temp) < 3:
print("Please enter atleast 3 words !")
else:
cond = True
temp = temp[-3:]
sen = " ".join(temp)
doPrediction(sen,tri_dict,quad_dict,vocab_dict,index)
if __name__ == '__main__':
main()
"""
Explanation: Driver function for doing the prediction
End of explanation
"""
|
Vizzuality/gfw
|
docs/Update_GFW_Layers_Vault.ipynb
|
mit
|
!pip install LMIPy
from IPython.display import clear_output
clear_output()
print('LMI ready!')
"""
Explanation: Create Layer Config Backup
This notebook outlines how to run a process to create a remote backup of gfw layers.
Rough process:
Run this notebook from the gfw/data folder
Wait...
Check _metadata.json files in the production and staging folders for changes
If everything looks good, make a PR
First, install the latest version of LMIPy
End of explanation
"""
import LMIPy as lmi
import os
import json
import shutil
from pprint import pprint
from datetime import datetime
from tqdm import tqdm
"""
Explanation: Next, import relevent modules
End of explanation
"""
envs = ['staging', 'production']
path = './backup/configs'
# Create directory and archive previous datasets
with open(path + '/metadata.json') as f:
date = json.load(f)[0]['updatedAt']
shutil.make_archive(f'./backup/archived/archive_{date}', 'zip', path)
# Check correct folders are found
if not all([folder in os.listdir(path) for folder in envs]):
print(f'Boo! Incorrect path: {path}')
else:
print('Good to go!')
"""
Explanation: First, pull the gfw repo and check that the following path correctly finds the data/layers folder, inside which, you should find a production and staging folder.
End of explanation
"""
%%time
for env in envs:
# Get all old ids
old_ids = [file.split('.json')[0] for file in os.listdir(path + f'/{env}') if '_metadata' not in file]
old_datasets = []
files = os.listdir(path + f'/{env}')
# Extract all old datasets
for file in files:
if '_metadata' not in file:
with open(path + f'/{env}/{file}') as f:
old_datasets.append(json.load(f))
# Now pull all current gfw datasets and save
col = lmi.Collection(app=['gfw'], env=env)
col.save(path + f'/{env}')
# Get all new ids
new_ids = [file.split('.json')[0] for file in os.listdir(path + f'/{env}') if '_metadata' not in file]
# See which are new, and which have been removed
added = list(set(new_ids) - set(old_ids))
removed = list(set(old_ids) - set(new_ids))
changed = []
# COmpare old and new, logging those that have changed
for old_dataset in old_datasets:
ds_id = old_dataset['id']
old_ids.append(ds_id)
with open(path + f'/{env}/{ds_id}.json') as f:
new_dataset = json.load(f)
if old_dataset != new_dataset:
changed.append(ds_id)
# Create metadata json
with open(path + f'/{env}/_metadata.json', 'w') as f:
meta = {
'updatedAt': datetime.today().strftime('%Y-%m-%d@%Hh-%Mm-%Ss'),
'env': env,
'differences': {
'changed': changed,
'added': added,
'removed': removed
}
}
# And save it too!
json.dump(meta,f)
print('Done!')
# Generate rich metadata
metadata = []
for env in tqdm(envs):
with open(path + f'/{env}/_metadata.json') as f:
metadata.append(json.load(f))
for env in tqdm(metadata):
for change_type, ds_list in env['differences'].items():
tmp = []
for dataset in ds_list:
# generate Dataset entity to get name etc...
print(dataset)
tmp.append(str(lmi.Dataset(dataset)))
env['differences'][change_type] = tmp
with open(path + f'/metadata.json', 'w') as f:
# And save it too!
json.dump(metadata,f)
pprint(metadata)
"""
Explanation: Run the following to save, build .json files and log changes.
Update record
End of explanation
"""
|
marcotcr/lime
|
doc/notebooks/Tutorial - MNIST and RF.ipynb
|
bsd-2-clause
|
import numpy as np
import matplotlib.pyplot as plt
from skimage.color import gray2rgb, rgb2gray, label2rgb # since the code wants color images
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784')
# make each image color so lime_image works correctly
X_vec = np.stack([gray2rgb(iimg) for iimg in mnist.data.reshape((-1, 28, 28))],0).astype(np.uint8)
y_vec = mnist.target.astype(np.uint8)
%matplotlib inline
fig, ax1 = plt.subplots(1,1)
ax1.imshow(X_vec[0], interpolation = 'none')
ax1.set_title('Digit: {}'.format(y_vec[0]))
"""
Explanation: Overview
The notebook shows how the lime_image tools can be applied to a smaller dataset like mnist. The dataset is very low resolution and allows quite a bit of rapid-iteration.
End of explanation
"""
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import Normalizer
class PipeStep(object):
"""
Wrapper for turning functions into pipeline transforms (no-fitting)
"""
def __init__(self, step_func):
self._step_func=step_func
def fit(self,*args):
return self
def transform(self,X):
return self._step_func(X)
makegray_step = PipeStep(lambda img_list: [rgb2gray(img) for img in img_list])
flatten_step = PipeStep(lambda img_list: [img.ravel() for img in img_list])
simple_rf_pipeline = Pipeline([
('Make Gray', makegray_step),
('Flatten Image', flatten_step),
#('Normalize', Normalizer()),
#('PCA', PCA(16)),
('RF', RandomForestClassifier())
])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_vec, y_vec,
train_size=0.55)
simple_rf_pipeline.fit(X_train, y_train)
%load_ext autoreload
%autoreload 2
import os,sys
try:
import lime
except:
sys.path.append(os.path.join('..', '..')) # add the current directory
import lime
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
explainer = lime_image.LimeImageExplainer(verbose = False)
segmenter = SegmentationAlgorithm('quickshift', kernel_size=1, max_dist=200, ratio=0.2)
%%time
explanation = explainer.explain_instance(X_test[0],
classifier_fn = simple_rf_pipeline.predict_proba,
top_labels=10, hide_color=0, num_samples=10000, segmentation_fn=segmenter)
temp, mask = explanation.get_image_and_mask(y_test[0], positive_only=True, num_features=10, hide_rest=False, min_weight = 0.01)
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (8, 4))
ax1.imshow(label2rgb(mask,temp, bg_label = 0), interpolation = 'nearest')
ax1.set_title('Positive Regions for {}'.format(y_test[0]))
temp, mask = explanation.get_image_and_mask(y_test[0], positive_only=False, num_features=10, hide_rest=False, min_weight = 0.01)
ax2.imshow(label2rgb(3-mask,temp, bg_label = 0), interpolation = 'nearest')
ax2.set_title('Positive/Negative Regions for {}'.format(y_test[0]))
# now show them for each class
fig, m_axs = plt.subplots(2,5, figsize = (12,6))
for i, c_ax in enumerate(m_axs.flatten()):
temp, mask = explanation.get_image_and_mask(i, positive_only=True, num_features=1000, hide_rest=False, min_weight = 0.01 )
c_ax.imshow(label2rgb(mask,X_test[0], bg_label = 0), interpolation = 'nearest')
c_ax.set_title('Positive for {}\nActual {}'.format(i, y_test[0]))
c_ax.axis('off')
"""
Explanation: Setup a Pipeline
Here we make a pipeline for processing the images where basically we flatten the image back to 1d vectors and then use a RandomForest Classifier
End of explanation
"""
pipe_pred_test = simple_rf_pipeline.predict(X_test)
wrong_idx = np.random.choice(np.where(pipe_pred_test!=y_test)[0])
print('Using #{} where the label was {} and the pipeline predicted {}'.format(wrong_idx, y_test[wrong_idx], pipe_pred_test[wrong_idx]))
%%time
explanation = explainer.explain_instance(X_test[wrong_idx],
classifier_fn = simple_rf_pipeline.predict_proba,
top_labels=10, hide_color=0, num_samples=10000, segmentation_fn=segmenter)
# now show them for each class
fig, m_axs = plt.subplots(2,5, figsize = (12,6))
for i, c_ax in enumerate(m_axs.flatten()):
temp, mask = explanation.get_image_and_mask(i, positive_only=True, num_features=10, hide_rest=False, min_weight = 0.01 )
c_ax.imshow(label2rgb(mask,temp, bg_label = 0), interpolation = 'nearest')
c_ax.set_title('Positive for {}\nActual {}'.format(i, y_test[wrong_idx]))
c_ax.axis('off')
"""
Explanation: Gaining Insight
Can we find an explanation for a classification the algorithm got wrong
End of explanation
"""
|
jakobrunge/tigramite
|
tutorials/tigramite_tutorial_assumptions.ipynb
|
gpl-3.0
|
# Imports
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
%matplotlib inline
## use `%matplotlib notebook` for interactive figures
# plt.style.use('ggplot')
import sklearn
import tigramite
from tigramite import data_processing as pp
from tigramite.toymodels import structural_causal_processes as toys
from tigramite import plotting as tp
from tigramite.pcmci import PCMCI
from tigramite.independence_tests import ParCorr, GPDC, CMIknn, CMIsymb
from tigramite.models import LinearMediation, Prediction
"""
Explanation: Causal discovery with TIGRAMITE
TIGRAMITE is a time series analysis python module. It allows to reconstruct graphical models (conditional independence graphs) from discrete or continuously-valued time series based on the PCMCI framework and create high-quality plots of the results.
PCMCI is described here:
J. Runge, P. Nowack, M. Kretschmer, S. Flaxman, D. Sejdinovic,
Detecting and quantifying causal associations in large nonlinear time series datasets. Sci. Adv. 5, eaau4996 (2019)
https://advances.sciencemag.org/content/5/11/eaau4996
For further versions of PCMCI (e.g., PCMCI+, LPCMCI, etc.), see the corresponding tutorials.
This tutorial explains the causal assumptions and gives walk-through examples. See the following paper for theoretical background:
Runge, Jakob. 2018. “Causal Network Reconstruction from Time Series: From Theoretical Assumptions to Practical Estimation.” Chaos: An Interdisciplinary Journal of Nonlinear Science 28 (7): 075310.
Last, the following Nature Communications Perspective paper provides an overview of causal inference methods in general, identifies promising applications, and discusses methodological challenges (exemplified in Earth system sciences):
https://www.nature.com/articles/s41467-019-10105-3
End of explanation
"""
np.random.seed(1)
data = np.random.randn(500, 3)
for t in range(1, 500):
# data[t, 0] += 0.6*data[t-1, 1]
data[t, 1] += 0.6*data[t-1, 0]
data[t, 2] += 0.6*data[t-1, 1] - 0.36*data[t-2, 0]
var_names = [r'$X^0$', r'$X^1$', r'$X^2$']
dataframe = pp.DataFrame(data, var_names=var_names)
# tp.plot_timeseries(dataframe)
"""
Explanation: Causal assumptions
Having introduced the basic functionality, we now turn to a discussion of the assumptions underlying a causal interpretation:
Faithfulness / Stableness: Independencies in data arise not from coincidence, but rather from causal structure or, expressed differently, If two variables are independent given some other subset of variables, then they are not connected by a causal link in the graph.
Causal Sufficiency: Measured variables include all of the common causes.
Causal Markov Condition: All the relevant probabilistic information that can be obtained from the system is contained in its direct causes or, expressed differently, If two variables are not connected in the causal graph given some set of conditions (see Runge Chaos 2018 for further definitions), then they are conditionally independent.
No contemporaneous effects: There are no causal effects at lag zero.
Stationarity
Parametric assumptions of independence tests (these were already discussed in basic tutorial)
Faithfulness
Faithfulness, as stated above, is an expression of the assumption that the independencies we measure come from the causal structure, i.e., the time series graph, and cannot occur due to some fine tuning of the parameters. Another unfaithful case are processes containing purely deterministic dependencies, i.e., $Y=f(X)$, without any noise. We illustrate these cases in the following.
Fine tuning
Suppose in our model we have two ways in which $X^0$ causes $X^2$, a direct one, and an indirect effect $X^0\to X^1 \to X^2$ as realized in the following model:
\begin{align}
X^0_t &= \eta^0_t\
X^1_t &= 0.6 X^0_{t-1} + \eta^1_t\
X^2_t &= 0.6 X^1_{t-1} - 0.36 X^0_{t-2} + \eta^2_t\
\end{align}
End of explanation
"""
parcorr = ParCorr()
pcmci_parcorr = PCMCI(
dataframe=dataframe,
cond_ind_test=parcorr,
verbosity=1)
all_parents = pcmci_parcorr.run_pc_stable(tau_max=2, pc_alpha=0.2)
"""
Explanation: Since here $X^2_t = 0.6 X^1_{t-1} - 0.36 X^0_{t-2} + \eta^2_t = 0.6 (0.6 X^0_{t-2} + \eta^1_{t-1}) - 0.36 X^0_{t-2} + \eta^2_t = 0.36 X^0_{t-2} - 0.36 X^0_{t-2} + ...$, there is no unconditional dependency $X^0_{t-2} \to X^2_t$ and the link is not detected in the condition-selection step:
End of explanation
"""
results = pcmci_parcorr.run_pcmci(tau_max=2, pc_alpha=0.2, alpha_level = 0.01)
"""
Explanation: However, since the other parent of $X^2$, namely $X^1_{t-1}$ is detected, the MCI step conditions on $X^1_{t-1}$ and can reveal the true underlying graph (in this particular case):
End of explanation
"""
np.random.seed(1)
data = np.random.randn(500, 3)
for t in range(1, 500):
data[t, 0] = 0.4*data[t-1, 1]
data[t, 2] += 0.3*data[t-2, 1] + 0.7*data[t-1, 0]
dataframe = pp.DataFrame(data, var_names=var_names)
tp.plot_timeseries(dataframe); plt.show()
parcorr = ParCorr()
pcmci_parcorr = PCMCI(
dataframe=dataframe,
cond_ind_test=parcorr,
verbosity=2)
results = pcmci_parcorr.run_pcmci(tau_max=2, pc_alpha=0.2, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: Note, however, that this is not always the case and such cancellation, even though a pathological case, can present a problem especially for smaller sample sizes.
Deterministic dependencies
Another violation of faithfulness can happen due to purely deterministic dependencies as shown here:
End of explanation
"""
np.random.seed(1)
data = np.random.randn(10000, 5)
a = 0.8
for t in range(5, 10000):
data[t, 0] += a*data[t-1, 0]
data[t, 1] += a*data[t-1, 1] + 0.5*data[t-1, 0]
data[t, 2] += a*data[t-1, 2] + 0.5*data[t-1, 1] + 0.5*data[t-1, 4]
data[t, 3] += a*data[t-1, 3] + 0.5*data[t-2, 4]
data[t, 4] += a*data[t-1, 4]
# tp.plot_timeseries(dataframe)
obsdata = data[:,[0, 1, 2, 3]]
var_names_lat = ['W', 'Y', 'X', 'Z', 'U']
for data_here in [data, obsdata]:
dataframe = pp.DataFrame(data_here)
parcorr = ParCorr()
pcmci_parcorr = PCMCI(
dataframe=dataframe,
cond_ind_test=parcorr,
verbosity=0)
results = pcmci_parcorr.run_pcmci(tau_max=5, pc_alpha=0.1, alpha_level = 0.001)
tp.plot_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names_lat,
link_colorbar_label='cross-MCI',
node_colorbar_label='auto-MCI',
); plt.show()
"""
Explanation: Here the partial correlation $X^1_{t-1} \to X^0_t$ is exactly 1. Since these now represent the same variable, the true link $X^0_{t-1} \to X^2_t$ cannot be detected anymore since we condition on $X^1_{t-2}$. Deterministic copies of other variables should be excluded from the analysis.
Causal sufficiency
Causal sufficiency demands that the set of variables contains all common causes of any two variables. This assumption is mostly violated when analyzing open complex systems outside a confined experimental setting. Any link estimated from a causal discovery algorithm could become non-significant if more variables are included in the analysis.
Observational causal inference assuming causal sufficiency should generally be seen more as one step towards a physical process understanding. There exist, however, algorithms that take into account and can expclicitely represent confounded links (e.g., the FCI algorithm and LPCMCI). Causal discovery can greatly help in an explorative model building analysis to get an idea of potential drivers. In particular, the absence of a link allows for a more robust conclusion: If there is no evidence for a statistical dependency, then a physical mechanism is less likely (assuming that the other assumptions hold).
See Runge, Jakob. 2018. “Causal Network Reconstruction from Time Series: From Theoretical Assumptions to Practical Estimation.” Chaos: An Interdisciplinary Journal of Nonlinear Science 28 (7): 075310.
for alternative approaches that do not necessitate Causal Sufficiency.
Unobserved driver / latent variable
For the common driver process, consider that the common driver was not measured:
End of explanation
"""
np.random.seed(42)
T = 2000
data = np.random.randn(T, 4)
# Simple sun
data[:,3] = np.sin(np.arange(T)*20/np.pi) + 0.1*np.random.randn(T)
c = 0.8
for t in range(1, T):
data[t, 0] += 0.4*data[t-1, 0] + 0.4*data[t-1, 1] + c*data[t-1,3]
data[t, 1] += 0.5*data[t-1, 1] + c*data[t-1,3]
data[t, 2] += 0.6*data[t-1, 2] + 0.3*data[t-2, 1] + c*data[t-1,3]
dataframe = pp.DataFrame(data, var_names=[r'$X^0$', r'$X^1$', r'$X^2$', 'Sun'])
tp.plot_timeseries(dataframe); plt.show()
"""
Explanation: The upper plot shows the true causal graph if all variables are observed. The lower graph shows the case where variable $U$ is hidden. Then several spurious links appear: (1) $X\to Z$ and (2) links from $Y$ and $W$ to $Z$, which is counterintuitive because there is no possible indirect pathway (see upper graph). What's the reason? The culprit is the collider $X$: MCI (or FullCI and any other causal measure conditioning on the entire past) between $Y$ and $Z$ is conditioned on the parents of $Z$, which includes $X$ here in the lower latent graph. But then conditioning on a collider opens up the paths from $Y$ and $W$ to $Z$ and makes them dependent.
Solar forcing
In a geoscientific context, the solar forcing typically is a strong common driver of many processes. To remove this trivial effect, time series are typically anomalized, that is, the average seasonal cycle is subtracted. But one could also include the solar forcing explicitely as shown here via a sine wave for an artificial example. We've also made the time series more realistic by adding an auto-dependency on their past values.
End of explanation
"""
parcorr = ParCorr()
dataframe_nosun = pp.DataFrame(data[:,[0,1,2]], var_names=[r'$X^0$', r'$X^1$', r'$X^2$'])
pcmci_parcorr = PCMCI(
dataframe=dataframe_nosun,
cond_ind_test=parcorr,
verbosity=0)
tau_max = 2
tau_min = 1
results = pcmci_parcorr.run_pcmci(tau_max=tau_max, pc_alpha=0.2, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: If we do not account for the common solar forcing, there will be many spurious links:
End of explanation
"""
parcorr = ParCorr()
# Only estimate parents of variables 0, 1, 2
selected_links = {}
for j in range(4):
if j in [0, 1, 2]:
selected_links[j] = [(var, -lag) for var in range(4)
for lag in range(tau_min, tau_max + 1)]
else:
selected_links[j] = []
pcmci_parcorr = PCMCI(
dataframe=dataframe,
cond_ind_test=parcorr,
verbosity=0)
results = pcmci_parcorr.run_pcmci(tau_min=tau_min, tau_max=tau_max, pc_alpha=0.2,
selected_links=selected_links, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=[r'$X^0$', r'$X^1$', r'$X^2$', 'Sun'],
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: However, if we explicitely include the solar forcing variable (which we assume is known in this case), we can identify the correct causal graph. Since we are not interested in the drivers of the solar forcing variable, we don't attempt to reconstruct its parents. This can be achieved by restricting selected_links.
End of explanation
"""
np.random.seed(1)
data = np.random.randn(1000, 3)
for t in range(1, 1000):
data[t, 0] += 0.*data[t-1, 0] + 0.6*data[t-1,2]
data[t, 1] += 0.*data[t-1, 1] + 0.6*data[t-1,0]
data[t, 2] += 0.*data[t-1, 2] + 0.6*data[t-1,1]
dataframe = pp.DataFrame(data, var_names=[r'$X^0$', r'$X^1$', r'$X^2$'])
tp.plot_timeseries(dataframe); plt.show()
"""
Explanation: Time sub-sampling
Sometimes a time series might be sub-sampled, that is the measurements are less frequent than the true underlying time-dependency. Consider the following process:
End of explanation
"""
pcmci_parcorr = PCMCI(dataframe=dataframe, cond_ind_test=ParCorr())
results = pcmci_parcorr.run_pcmci(tau_min=0,tau_max=2, pc_alpha=0.2, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: With the original time sampling we obtain the correct causal graph:
End of explanation
"""
sampled_data = data[::2]
pcmci_parcorr = PCMCI(dataframe=pp.DataFrame(sampled_data, var_names=var_names),
cond_ind_test=ParCorr(), verbosity=0)
results = pcmci_parcorr.run_pcmci(tau_min=0, tau_max=2, pc_alpha=0.2, alpha_level=0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: If we sub-sample the data, very counter-intuitive links can appear. The true causal loop gets detected in the wrong direction:
End of explanation
"""
np.random.seed(1)
T = 10000
# Generate 1/f noise by averaging AR1-process with wide range of coeffs
# (http://www.scholarpedia.org/article/1/f_noise)
def one_over_f_noise(T, n_ar=20):
whitenoise = np.random.randn(T, n_ar)
ar_coeffs = np.linspace(0.1, 0.9, n_ar)
for t in range(T):
whitenoise[t] += ar_coeffs*whitenoise[t-1]
return whitenoise.sum(axis=1)
data = np.random.randn(T, 3)
data[:,0] += one_over_f_noise(T)
data[:,1] += one_over_f_noise(T)
data[:,2] += one_over_f_noise(T)
for t in range(1, T):
data[t, 0] += 0.4*data[t-1, 1]
data[t, 2] += 0.3*data[t-2, 1]
dataframe = pp.DataFrame(data, var_names=var_names)
tp.plot_timeseries(dataframe); plt.show()
# plt.psd(data[:,0],return_line=True)[2]
# plt.psd(data[:,1],return_line=True)[2]
# plt.psd(data[:,2],return_line=True)[2]
# plt.gca().set_xscale("log", nonposx='clip')
# plt.gca().set_yscale("log", nonposy='clip')
"""
Explanation: If causal lags are smaller than the time sampling, such problems may occur. Causal inference for sub-sampled data is still an active area of research.
Causal Markov condition
The Markov condition can be rephrased as assuming that the noises driving each variable are independent of each other and independent in time (iid). This is violated in the following example where each variable is driven by 1/f noise which refers to the scaling of the power spectrum. 1/f noise can be generated by averaging AR(1) processes (http://www.scholarpedia.org/article/1/f_noise) which means that the noise is not independent in time anymore (even though the noise terms of each individual variable are still independent). Note that this constitutes a violation of the Markov Condition of the observed process only. So one might call this rather a violation of Causal Sufficiency.
End of explanation
"""
parcorr = ParCorr()
pcmci_parcorr = PCMCI(
dataframe=dataframe,
cond_ind_test=parcorr,
verbosity=1)
results = pcmci_parcorr.run_pcmci(tau_max=5, pc_alpha=0.2, alpha_level = 0.01)
"""
Explanation: Here PCMCI will detect many spurious links, especially auto-dependencies, since the process has long memory and the present state is not independent of the further past given some set of parents.
End of explanation
"""
np.random.seed(1)
data = np.random.randn(1000, 3)
for t in range(1, 1000):
data[t, 0] += 0.7*data[t-1, 0]
data[t, 1] += 0.6*data[t-1, 1] + 0.6*data[t-1,0]
data[t, 2] += 0.5*data[t-1, 2] + 0.6*data[t-1,1]
dataframe = pp.DataFrame(data, var_names=var_names)
tp.plot_timeseries(dataframe); plt.show()
"""
Explanation: Time aggregation
An important choice is how to aggregate measured time series. For example, climate time series might have been measured daily, but one might be interested in a less noisy time-scale and analyze monthly aggregates. Consider the following process:
End of explanation
"""
pcmci_parcorr = PCMCI(dataframe=dataframe, cond_ind_test=ParCorr())
results = pcmci_parcorr.run_pcmci(tau_min=0,tau_max=2, pc_alpha=0.2, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: With the original time aggregation we obtain the correct causal graph:
End of explanation
"""
aggregated_data = pp.time_bin_with_mask(data, time_bin_length=4)
pcmci_parcorr = PCMCI(dataframe=pp.DataFrame(aggregated_data[0], var_names=var_names), cond_ind_test=ParCorr(),
verbosity=0)
results = pcmci_parcorr.run_pcmci(tau_min=0, tau_max=2, pc_alpha=0.2, alpha_level = 0.01)
# Plot time series graph
tp.plot_time_series_graph(
val_matrix=results['val_matrix'],
graph=results['graph'],
var_names=var_names,
link_colorbar_label='MCI',
); plt.show()
"""
Explanation: If we aggregate the data, we also detect a contemporaneous dependency for which no causal direction can be assessed in this framework and we obtain also several lagged spurious links. Essentially, we now have direct causal effects that appear contemporaneous on the aggregated time scale. Also causal inference for time-aggregated data is still an active area of research. Note again that this constitutes a violation of the Markov Condition of the observed process only. So one might call this rather a violation of Causal Sufficiency.
End of explanation
"""
|
tyberion/jupyter_pdf_slides
|
Example.ipynb
|
mit
|
attend = sns.load_dataset("attention").query("subject <= 12")
g = sns.FacetGrid(attend, col="subject", col_wrap=4, size=2, ylim=(0, 10))
g.map(sns.pointplot, "solutions", "score", color=".3", ci=None);
"""
Explanation: Calculate the attention of subjects
Here we calculate the attention of subjects
End of explanation
"""
tips = sns.load_dataset("tips")
with sns.axes_style("white"):
g = sns.FacetGrid(tips, row="sex", col="smoker", margin_titles=True, size=2.5)
g.map(plt.scatter, "total_bill", "tip", alpha=0.5);
g.set_axis_labels("Total bill (US Dollars)", "Tip");
g.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);
g.fig.subplots_adjust(wspace=.02, hspace=.02);
"""
Explanation: A slide with just text
First point
First subpoint
Second subpoint
Second point
Third point
A look at the tips dataset
This is a formula $x = \frac{1}{2} \cos(y)$
End of explanation
"""
g = sns.FacetGrid(tips, col="day", size=4, aspect=.5)
g.map(sns.barplot, "sex", "total_bill");
"""
Explanation: Here we show some code
End of explanation
"""
|
RTHMaK/RPGOne
|
scipy-2017-sklearn-master/notebooks/03 Data Representation for Machine Learning.ipynb
|
apache-2.0
|
from sklearn.datasets import load_iris
iris = load_iris()
"""
Explanation: The use of watermark (above) is optional, and we use it to keep track of the changes while developing the tutorial material. (You can install this IPython extension via "pip install watermark". For more information, please see: https://github.com/rasbt/watermark).
SciPy 2016 Scikit-learn Tutorial
Representation and Visualization of Data
Machine learning is about fitting models to data; for that reason, we'll start by
discussing how data can be represented in order to be understood by the computer. Along
with this, we'll build on our matplotlib examples from the previous section and show some
examples of how to visualize data.
Data in scikit-learn
Data in scikit-learn, with very few exceptions, is assumed to be stored as a
two-dimensional array, of shape [n_samples, n_features]. Many algorithms also accept scipy.sparse matrices of the same shape.
n_samples: The number of samples: each sample is an item to process (e.g. classify).
A sample can be a document, a picture, a sound, a video, an astronomical object,
a row in database or CSV file,
or whatever you can describe with a fixed set of quantitative traits.
n_features: The number of features or distinct traits that can be used to describe each
item in a quantitative manner. Features are generally real-valued, but may be Boolean or
discrete-valued in some cases.
The number of features must be fixed in advance. However it can be very high dimensional
(e.g. millions of features) with most of them being "zeros" for a given sample. This is a case
where scipy.sparse matrices can be useful, in that they are
much more memory-efficient than NumPy arrays.
As we recall from the previous section (or Jupyter notebook), we represent samples (data points or instances) as rows in the data array, and we store the corresponding features, the "dimensions," as columns.
A Simple Example: the Iris Dataset
As an example of a simple dataset, we're going to take a look at the iris data stored by scikit-learn.
The data consists of measurements of three different iris flower species. There are three different species of iris
in this particular dataset as illustrated below:
Iris Setosa
<img src="figures/iris_setosa.jpg" width="50%">
Iris Versicolor
<img src="figures/iris_versicolor.jpg" width="50%">
Iris Virginica
<img src="figures/iris_virginica.jpg" width="50%">
Quick Question:
Let's assume that we are interested in categorizing new observations; we want to predict whether unknown flowers are Iris-Setosa, Iris-Versicolor, or Iris-Virginica flowers, respectively. Based on what we've discussed in the previous section, how would we construct such a dataset?*
Remember: we need a 2D array of size [n_samples x n_features].
What would the n_samples refer to?
What might the n_features refer to?
Remember that there must be a fixed number of features for each sample, and feature
number j must be a similar kind of quantity for each sample.
Loading the Iris Data with Scikit-learn
For future experiments with machine learning algorithms, we recommend you to bookmark the UCI machine learning repository, which hosts many of the commonly used datasets that are useful for benchmarking machine learning algorithms -- a very popular resource for machine learning practioners and researchers. Conveniently, some of these datasets are already included in scikit-learn so that we can skip the tedious parts of downloading, reading, parsing, and cleaning these text/CSV files. You can find a list of available datasets in scikit-learn at: http://scikit-learn.org/stable/datasets/#toy-datasets.
For example, scikit-learn has a very straightforward set of data on these iris species. The data consist of
the following:
Features in the Iris dataset:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
Target classes to predict:
Iris Setosa
Iris Versicolour
Iris Virginica
<img src="figures/petal_sepal.jpg" alt="Sepal" style="width: 50%;"/>
(Image: "Petal-sepal". Licensed under CC BY-SA 3.0 via Wikimedia Commons - https://commons.wikimedia.org/wiki/File:Petal-sepal.jpg#/media/File:Petal-sepal.jpg)
scikit-learn embeds a copy of the iris CSV file along with a helper function to load it into numpy arrays:
End of explanation
"""
iris.keys()
"""
Explanation: The resulting dataset is a Bunch object: you can see what's available using
the method keys():
End of explanation
"""
n_samples, n_features = iris.data.shape
print('Number of samples:', n_samples)
print('Number of features:', n_features)
# the sepal length, sepal width, petal length and petal width of the first sample (first flower)
print(iris.data[0])
"""
Explanation: The features of each sample flower are stored in the data attribute of the dataset:
End of explanation
"""
print(iris.data.shape)
print(iris.target.shape)
print(iris.target)
import numpy as np
np.bincount(iris.target)
"""
Explanation: The information about the class of each sample is stored in the target attribute of the dataset:
End of explanation
"""
print(iris.target_names)
"""
Explanation: Using the NumPy's bincount function (above), we can see that the classes are distributed uniformly in this dataset - there are 50 flowers from each species, where
class 0: Iris-Setosa
class 1: Iris-Versicolor
class 2: Iris-Virginica
These class names are stored in the last attribute, namely target_names:
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
x_index = 3
colors = ['blue', 'red', 'green']
for label, color in zip(range(len(iris.target_names)), colors):
plt.hist(iris.data[iris.target==label, x_index],
label=iris.target_names[label],
color=color)
plt.xlabel(iris.feature_names[x_index])
plt.legend(loc='upper right')
plt.show()
x_index = 3
y_index = 0
colors = ['blue', 'red', 'green']
for label, color in zip(range(len(iris.target_names)), colors):
plt.scatter(iris.data[iris.target==label, x_index],
iris.data[iris.target==label, y_index],
label=iris.target_names[label],
c=color)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index])
plt.legend(loc='upper left')
plt.show()
"""
Explanation: This data is four dimensional, but we can visualize one or two of the dimensions
at a time using a simple histogram or scatter-plot. Again, we'll start by enabling
matplotlib inline mode:
End of explanation
"""
import pandas as pd
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
pd.tools.plotting.scatter_matrix(iris_df, figsize=(8, 8));
"""
Explanation: Quick Exercise:
Change x_index and y_index in the above script
and find a combination of two parameters
which maximally separate the three classes.
This exercise is a preview of dimensionality reduction, which we'll see later.
An aside: scatterplot matrices
Instead of looking at the data one plot at a time, a common tool that analysts use is called the scatterplot matrix.
Scatterplot matrices show scatter plots between all features in the data set, as well as histograms to show the distribution of each feature.
End of explanation
"""
from sklearn import datasets
"""
Explanation: Other Available Data
Scikit-learn makes available a host of datasets for testing learning algorithms.
They come in three flavors:
Packaged Data: these small datasets are packaged with the scikit-learn installation,
and can be downloaded using the tools in sklearn.datasets.load_*
Downloadable Data: these larger datasets are available for download, and scikit-learn
includes tools which streamline this process. These tools can be found in
sklearn.datasets.fetch_*
Generated Data: there are several datasets which are generated from models based on a
random seed. These are available in the sklearn.datasets.make_*
You can explore the available dataset loaders, fetchers, and generators using IPython's
tab-completion functionality. After importing the datasets submodule from sklearn,
type
datasets.load_<TAB>
or
datasets.fetch_<TAB>
or
datasets.make_<TAB>
to see a list of available functions.
End of explanation
"""
from sklearn.datasets import get_data_home
get_data_home()
"""
Explanation: The data downloaded using the fetch_ scripts are stored locally,
within a subdirectory of your home directory.
You can use the following to determine where it is:
End of explanation
"""
from sklearn.datasets import load_digits
digits = load_digits()
digits.keys()
n_samples, n_features = digits.data.shape
print((n_samples, n_features))
print(digits.data[0])
print(digits.target)
"""
Explanation: Be warned: many of these datasets are quite large, and can take a long time to download!
If you start a download within the IPython notebook
and you want to kill it, you can use ipython's "kernel interrupt" feature, available in the menu or using
the shortcut Ctrl-m i.
You can press Ctrl-m h for a list of all ipython keyboard shortcuts.
Loading Digits Data
Now we'll take a look at another dataset, one where we have to put a bit
more thought into how to represent the data. We can explore the data in
a similar manner as above:
End of explanation
"""
print(digits.data.shape)
print(digits.images.shape)
"""
Explanation: The target here is just the digit represented by the data. The data is an array of
length 64... but what does this data mean?
There's a clue in the fact that we have two versions of the data array:
data and images. Let's take a look at them:
End of explanation
"""
import numpy as np
print(np.all(digits.images.reshape((1797, 64)) == digits.data))
"""
Explanation: We can see that they're related by a simple reshaping:
End of explanation
"""
# set up the figure
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the digits: each image is 8x8 pixels
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')
# label the image with the target value
ax.text(0, 7, str(digits.target[i]))
"""
Explanation: Let's visualize the data. It's little bit more involved than the simple scatter-plot
we used above, but we can do it rather quickly.
End of explanation
"""
from sklearn.datasets import make_s_curve
data, colors = make_s_curve(n_samples=1000)
print(data.shape)
print(colors.shape)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=colors)
ax.view_init(10, -60)
"""
Explanation: We see now what the features mean. Each feature is a real-valued quantity representing the
darkness of a pixel in an 8x8 image of a hand-written digit.
Even though each sample has data that is inherently two-dimensional, the data matrix flattens
this 2D data into a single vector, which can be contained in one row of the data matrix.
Generated Data: the S-Curve
One dataset often used as an example of a simple nonlinear dataset is the S-cure:
End of explanation
"""
from sklearn.datasets import fetch_olivetti_faces
# fetch the faces data
# Use a script like above to plot the faces image data.
# hint: plt.cm.bone is a good colormap for this data
"""
Explanation: This example is typically used with an unsupervised learning method called Locally
Linear Embedding. We'll explore unsupervised learning in detail later in the tutorial.
Exercise: working with the faces dataset
Here we'll take a moment for you to explore the datasets yourself.
Later on we'll be using the Olivetti faces dataset.
Take a moment to fetch the data (about 1.4MB), and visualize the faces.
You can copy the code used to visualize the digits above, and modify it for this data.
End of explanation
"""
# %load solutions/03A_faces_plot.py
"""
Explanation: Solution:
End of explanation
"""
|
KaiSzuttor/espresso
|
doc/tutorials/02-charged_system/02-charged_system-2.ipynb
|
gpl-3.0
|
from espressomd import System, electrostatics, electrostatic_extensions
from espressomd.shapes import Wall
from espressomd.minimize_energy import steepest_descent
import espressomd
import numpy
"""
Explanation: Tutorial 2: A Simple Charged System, Part 2
7 2D Electrostatics and Constraints
In this section, we use the parametrized NaCl system from the last task to simulate a molten salt in a
parallel plate capacitor with and without applied electric field. We have to extend our simulation by several aspects:
Confinement
ESPResSo features a number of basic shapes like cylinders, walls or spheres to simulate confined systems.
Here, we use two walls at $z = 0$ and $z = L_z$ for the parallel plate setup ($L_z$: box length in $z$-direction)
2D-Electrostatics
ESPResSo also has a number of ways to account for the unwanted electrostatic interaction in the now non-periodic $z$-dimension.
We use the 3D-periodic P$^3$M algorithm in combination with the Electrostatic Layer Correction (ELC).
ELC subtracts the forces caused by the periodic images in the $z$-dimension. Another way would be to use the explicit 2D-electrostatics algorithm
MMM2D, also available in ESPResSo.
Electric Field
The simple geometry of the system allows us to treat an electric field in $z$-direction as a homogeneous force.
Note that we use inert walls here and don't take into account the dielectric contrast caused by metal electrodes.
Parameters
For our molten NaCl, we use a temperature $100 \ \mathrm{K}$ above the melting point ($1198.3 \ \mathrm{K}$)
and an approximated density of $\rho = 1.1138 \ \mathrm{u \mathring{A}}$$^{-3}$ found in [1].
Let's walk through the python script. We need additional imports for the wall shapes and the ELC algorithm:
End of explanation
"""
required_features = ["EXTERNAL_FORCES", "MASS", "ELECTROSTATICS", "LENNARD_JONES"]
espressomd.assert_features(required_features)
print(espressomd.features())
# System parameters
n_part = 1000
n_ionpairs = n_part / 2
density = 1.1138
time_step = 0.001823
temp = 1198.3
gamma = 50
k_B = 1.380649e-23 # units of [J/K]
q_e = 1.602176634e-19 # units of [C]
epsilon_0 = 8.8541878128e-12 # units of [C^2/J/m]
coulomb_prefactor = q_e**2 / (4 * numpy.pi * epsilon_0) * 1e10
l_bjerrum = 0.885**2 * coulomb_prefactor / (k_B * temp)
wall_margin = 0.5
Ez = 0
num_steps_equilibration = 3000
num_configs = 200
integ_steps_per_config = 100
"""
Explanation: If we target a liquid system, we should not set up the particles in a lattice,
as this introduces unwanted structure in the starting configuration.
We define our system size by the number of particles and the density.
The system parameters lead to the following values:
End of explanation
"""
# Particle parameters
types = {"Cl": 0, "Na": 1, "Electrode": 2}
numbers = {"Cl": n_ionpairs, "Na": n_ionpairs}
charges = {"Cl": -1.0, "Na": 1.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Electrode": 3.37}
lj_epsilons = {"Cl": 192.45, "Na": 17.44, "Electrode": 24.72}
lj_cuts = {"Cl": 3.0 * lj_sigmas["Cl"],
"Na": 3.0 * lj_sigmas["Na"],
"Electrode": 3.0 * lj_sigmas["Electrode"]}
masses = {"Cl": 35.453, "Na": 22.99, "Electrode": 12.01}
"""
Explanation: We save the force field parameters in python dictionaries, now with parameters for the walls:
End of explanation
"""
# Setup System
box_l = (n_ionpairs * sum(masses.values()) / density)**(1. / 3.)
box_z = box_l + 2.0 * (lj_sigmas["Electrode"] + wall_margin)
elc_gap = box_z * 0.15
system = System(box_l=[box_l, box_l, box_z + elc_gap])
box_volume = numpy.prod([box_l, box_l, box_z])
system.periodicity = [True, True, True]
system.time_step = time_step
system.cell_system.skin = 0.3
"""
Explanation: To finally calculate the box size, we take into account the diameter of the electrode interaction.
Additionally, ELC needs a particle-free gap in the $z$-direction behind the wall.
End of explanation
"""
# Walls
system.constraints.add(shape=Wall(dist=wall_margin, normal=[0, 0, 1]),
particle_type=types["Electrode"])
system.constraints.add(shape=Wall(dist=-(box_z - wall_margin), normal=[0, 0, -1]),
particle_type=types["Electrode"])
"""
Explanation: In the next snippet, we add the walls to our system. Our constraint takes two arguments:
First the <tt>shape</tt>, in our case a simple plane defined by its normal vector and the distance from the origin,
second the <tt>particle_type</tt>, which is used to set up the interaction between particles and constraints.
End of explanation
"""
# Place particles
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Cl"],
pos=p, q=charges["Cl"], mass=masses["Cl"])
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Na"],
pos=p, q=charges["Na"], mass=masses["Na"])
"""
Explanation: Now we place the particles at random position without overlap with the walls:
End of explanation
"""
# Lennard-Jones interactions parameters
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
for s in [["Cl", "Na"], ["Cl", "Cl"], ["Na", "Na"],
["Na", "Electrode"], ["Cl", "Electrode"]]:
lj_sig = combination_rule_sigma("Berthelot",
lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma("Berthelot",
lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon("Lorentz",
lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
"""
Explanation: The scheme to set up the Lennard-Jones interaction is the same as before,
extended by the Electrode-Ion interactions:
End of explanation
"""
energy = system.analysis.energy()
print("Before Minimization: E_total = {:.3e}".format(energy['total']))
steepest_descent(system, f_max=10, gamma=10, max_steps=2000,
max_displacement=0.01)
energy = system.analysis.energy()
print("After Minimization: E_total = {:.3e}".format(energy['total']))
# Set thermostat
system.thermostat.set_langevin(kT=temp, gamma=gamma, seed=42)
"""
Explanation: Next is the Lennard-Jones equilibration, followed by the thermostat:
End of explanation
"""
# Tuning Electrostatics
p3m = electrostatics.P3M(prefactor=l_bjerrum * temp,
accuracy=1e-2)
system.actors.add(p3m)
elc = electrostatic_extensions.ELC(gap_size=elc_gap,
maxPWerror=1e-3)
system.actors.add(elc)
"""
Explanation: As described, we use P$^3$M in combination with ELC to account for the 2D-periodicity.
ELC is also added to the <tt>actors</tt> of the system and takes gap size and maximum
pairwise errors as arguments.
End of explanation
"""
for p in system.part:
p.ext_force = [0, 0, Ez * p.q]
"""
Explanation: For now, our electric field is zero, but we want to switch it on later.
Here we run over all particles and set an external force on the charges caused
by the field:
End of explanation
"""
# Temperature Equilibration
system.time = 0.0
for i in range(int(num_steps_equilibration / 100)):
energy = system.analysis.energy()
temp_measured = energy['kinetic'] / ((3.0 / 2.0) * n_part)
print("progress={:.0f}%, t={:.1f}, E_total={:.2f}, E_coulomb={:.2f}, T={:.4f}"
.format(i * 100. / int(num_steps_equilibration / 100 - 1), system.time,
energy['total'], energy['coulomb'], temp_measured), end='\r')
system.integrator.run(100)
print()
"""
Explanation: This is followed by our standard temperature equilibration:
End of explanation
"""
# Integration
bins = 100
z_dens_na = numpy.zeros(bins)
z_dens_cl = numpy.zeros(bins)
system.time = 0.0
cnt = 0
for i in range(num_configs):
print('progress: {:>3.0f}%'.format(i * 100. / num_configs), end='\r')
energy = system.analysis.energy()
temp_measured = energy['kinetic'] / ((3.0 / 2.0) * n_part)
system.integrator.run(integ_steps_per_config)
for p in system.part:
bz = int(p.pos[2] / box_z * bins)
if p.type == types["Na"]:
z_dens_na[bz] += 1.0
elif p.type == types["Cl"]:
z_dens_cl[bz] += 1.0
cnt += 1
print('progress: 100%')
"""
Explanation: In the integration loop, we like to measure the density profile for both ion species along the $z$-direction.
We use a simple histogram analysis to accumulate the density data. Integration takes a while.
End of explanation
"""
# Analysis
# Average / Normalize with Volume
z_dens_na /= (cnt * box_volume / bins)
z_dens_cl /= (cnt * box_volume / bins)
z_values = numpy.linspace(0, box_l, num=bins)
res = numpy.column_stack((z_values, z_dens_na, z_dens_cl))
numpy.savetxt("z_density.data", res,
header="#z rho_na(z) rho_cl(z)")
"""
Explanation: Finally, we calculate the average, normalize the data with the bin volume and save it to
a file using NumPy's <tt>savetxt</tt> command.
End of explanation
"""
import matplotlib.pyplot as plt
plt.ion()
plt.figure(figsize=(10, 6), dpi=80)
plt.plot(z_values, z_dens_na, label='Na')
plt.plot(z_values, z_dens_cl, label='Cl')
plt.xlabel('$z$-axis $(\\mathrm{\\AA})$', fontsize=20)
plt.ylabel('Density $(\\mathrm{u\\AA}^{-3})$', fontsize=20)
plt.legend(fontsize=16)
plt.show()
"""
Explanation: Finally we can plot the density of the ions.
End of explanation
"""
|
ledeprogram/algorithms
|
class6/donow/Gruen_Gianna_6_donow.ipynb
|
gpl-3.0
|
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import statsmodels.formula.api as smf
"""
Explanation: 1. Import the necessary packages to read in the data, plot, and create a linear regression model
End of explanation
"""
df = pd.read_csv('hanford.csv')
df
"""
Explanation: 2. Read in the hanford.csv file
End of explanation
"""
df.describe()
iqr = df.quantile(q=0.75) - df.quantile(q=0.25)
iqr
ual = df.quantile(q=0.75) + (iqr * 1.5)
ual
lal = df.quantile(q=0.25) - (iqr * 1.5)
lal
"""
Explanation: 3. Calculate the basic descriptive statistics on the data
End of explanation
"""
df.corr()
"""
Explanation: 4. Calculate the coefficient of correlation (r) and generate the scatter plot. Does there seem to be a correlation worthy of investigation?
End of explanation
"""
lm = smf.ols(formula="Mortality~Exposure",data=df).fit()
lm.params
intercept, slope = lm.params
exposure_input = input("Type in an exposre you'd like to know the mortality for:")
if exposure_input:
prediction = (float(lm.params['Exposure']) * float(exposure_input)) + (float(lm.params['Intercept']))
print(prediction)
"""
Explanation: Yes, it seems very much so that there's a correlation worth to be investigated
5. Create a linear regression model based on the available data to predict the mortality rate given a level of exposure
End of explanation
"""
fig, ax = plt.subplots(figsize=(7,7))
plt.style.use('ggplot')
ax = df.plot(ax = ax, kind= 'scatter', x = 'Exposure', y = 'Mortality')
plt.plot(df['Exposure'],slope*df['Exposure']+intercept, color="red", linewidth=2)
r = df.corr()['Exposure']['Mortality']
r
coefficient_determination = r **2
coefficient_determination
"""
Explanation: 6. Plot the linear regression line on the scatter plot of values. Calculate the r^2 (coefficient of determination)
End of explanation
"""
prediction = (float(lm.params['Exposure']) * 10 + (float(lm.params['Intercept'])))
print(prediction)
"""
Explanation: 7. Predict the mortality rate (Cancer per 100,000 man years) given an index of exposure = 10
End of explanation
"""
|
qutip/qutip-notebooks
|
development/development-ssesolver-new-methods.ipynb
|
lgpl-3.0
|
%matplotlib inline
%config InlineBackend.figure_formats = ['svg']
from qutip import *
from qutip.ui.progressbar import BaseProgressBar
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
y_sse = None
import time
"""
Explanation: Test for different solvers for stochastic equation
Based on development-smesolver-new-methods by Manuel Grimm, Niels Lörch, and Denis V. Vasilyev.
Eric Giguere, March 2018
End of explanation
"""
def arccoth(x):
return 0.5*np.log((1.+x)/(x-1.))
############ parameters #############
th = 0.1 # Interaction parameter
alpha = np.cos(th)
beta = np.sin(th)
gamma = 1.
def gammaf(t):
return 0.25+t/12+t*t/6
def f_gamma(t,*args):
return (0.25+t/12+t*t/6)**(0.5)
################# Solution of the differential equation for the variance Vc ####################
T = 6.
N_store = 200
tlist = np.linspace(0,T,N_store)
y0 = 0.5
def func(y, t):
return -(gammaf(t) - alpha*beta)*y - 2*alpha*alpha*y*y + 0.5*gammaf(t)
y_td = odeint(func, y0, tlist)
def func(y, t):
return -(gamma - alpha*beta)*y - 2*alpha*alpha*y*y + 0.5*gamma
y = odeint(func, y0, tlist)
############ Exact steady state solution for Vc #########################
Vc = (alpha*beta - gamma + np.sqrt((gamma-alpha*beta)**2 + 4*gamma*alpha**2))/(4*alpha**2)
#### Analytic solution
A = (gamma**2 + alpha**2 * (beta**2 + 4*gamma) - 2*alpha*beta*gamma)**0.5
B = arccoth((-4*alpha**2*y0 + alpha*beta - gamma)/A)
y_an = (alpha*beta - gamma + A / np.tanh(0.5*A*tlist - B))/(4*alpha**2)
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
ax.set_title('Variance as a function of time')
ax.plot(tlist,y)
ax.plot(tlist,Vc*np.ones_like(tlist))
ax.plot(tlist,y_an)
ax.set_ylim(0,0.5)
ax2.set_title('Deviation of odeint from analytic solution')
ax2.set_xlabel('t')
ax2.set_ylabel(r'$\epsilon$')
ax2.plot(tlist,y_an - y.T[0]);
"""
Explanation: Just check that analytical solution coincides with the solution of ODE for the variance
End of explanation
"""
####################### Model ###########################
N = 30 # number of Fock states
Id = qeye(N)
a = destroy(N)
s = 0.5*((alpha+beta)*a + (alpha-beta)*a.dag())
x = (a + a.dag())/np.sqrt(2)
H = Id
c_op = [np.sqrt(gamma)*a]
c_op_td = [[a,f_gamma]]
sc_op = [s]
e_op = [x, x*x]
rho0 = fock_dm(N,0) # initial vacuum state
#sc_len=1 # one stochastic operator
############## time steps and trajectories ###################
ntraj = 1 #100 # number of trajectories
T = 6. # final time
N_store = 200 # number of time steps for which we save the expectation values/density matrix
tlist = np.linspace(0,T,N_store)
ddt = (tlist[1]-tlist[0])
Nsubs = list((13*np.logspace(0,1,10)).astype(np.int))
stepsizes = [ddt/j for j in Nsubs] # step size is doubled after each evaluation
Nt = len(Nsubs) # number of step sizes that we compare
Nsubmax = Nsubs[-1] # Number of intervals for the smallest step size;
dtmin = (tlist[1]-tlist[0])/(Nsubmax)
"""
Explanation: Test of different SME solvers
End of explanation
"""
# Analetical solution not available:
# Compute the evolution with the best solver and very small step size and use it as the reference
sol = ssesolve(H, fock(N), tlist, [sc_op[0]+c_op[0]], e_op, nsubsteps=2000, method="homodyne",solver="taylor2.0")
y_sse = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
ntraj = 1
def run_sse(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve(H, fock(N), tlist, [sc_op[0]+c_op[0]], e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats(**kw):
start = time.time()
y = run_sse(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit,time.time()-start
stats_cte = []
stats_cte.append(get_stats(solver='euler-maruyama'))
stats_cte.append(get_stats(solver='platen'))
stats_cte.append(get_stats(solver='pred-corr'))
stats_cte.append(get_stats(solver='milstein'))
stats_cte.append(get_stats(solver='milstein-imp', tol=1e-9))
stats_cte.append(get_stats(solver='pred-corr-2'))
stats_cte.append(get_stats(solver='explicit1.5'))
stats_cte.append(get_stats(solver="taylor1.5"))
stats_cte.append(get_stats(solver="taylor1.5-imp", tol=1e-9))
stats_cte.append(get_stats(solver="taylor2.0"))
stats_cte.append(get_stats(solver="taylor2.0", noiseDepth=500))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_cte):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.003*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.01*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.001*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.01*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**2.0, label="$\propto\Delta t^{2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
"""
Explanation: Plotting the figure - Constant case
End of explanation
"""
def H_f(t,args):
return 0.125+t/12+t*t/72
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op, e_op,
nsubsteps=2500, method="homodyne",solver="taylor2.0")
y_sse_td = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_td)
ntraj = 1
def run_sse_td(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_td - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats(**kw):
y = run_sse_td(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit
stats_td = []
stats_td.append(get_stats(solver='euler-maruyama'))
stats_td.append(get_stats(solver='platen'))
stats_td.append(get_stats(solver='pred-corr'))
stats_td.append(get_stats(solver='milstein'))
stats_td.append(get_stats(solver='milstein-imp'))
stats_td.append(get_stats(solver='pred-corr-2'))
stats_td.append(get_stats(solver='explicit1.5'))
stats_td.append(get_stats(solver="taylor1.5"))
stats_td.append(get_stats(solver="taylor1.5-imp", tol=1e-9))
stats_td.append(get_stats(solver="taylor2.0"))
stats_td.append(get_stats(solver="taylor2.0", noiseDepth=500))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_td):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.1*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.loglog(stepsizes, 0.5*np.array(stepsizes)**2.0, label="$\propto\Delta t^{2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
"""
Explanation: Deterministic part time dependent
End of explanation
"""
def H_f(t,args):
return 0.125+t/12+t*t/72
def H_bf(t,args):
return 0.125+t/10+t*t/108
sc_op_td = [[sc_op[0],H_bf]]
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op_td, e_op,
nsubsteps=2000, method="homodyne",solver="taylor15")
y_sse_btd = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_btd)
ntraj = 1
def run_sse_td(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist, sc_op_td, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_btd - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats_b(**kw):
y = run_sse_td(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return y,tag,fit
stats_d2_td = []
stats_d2_td.append(get_stats_b(solver='euler-maruyama'))
stats_d2_td.append(get_stats_b(solver='platen'))
stats_d2_td.append(get_stats_b(solver='pred-corr'))
stats_d2_td.append(get_stats_b(solver='milstein'))
stats_d2_td.append(get_stats_b(solver='milstein-imp'))
stats_d2_td.append(get_stats_b(solver='pred-corr-2'))
stats_d2_td.append(get_stats_b(solver='explicit1.5'))
stats_d2_td.append(get_stats_b(solver="taylor1.5"))
stats_d2_td.append(get_stats_b(solver="taylor1.5-imp", tol=1e-9))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>1hdD"
for i,run in enumerate(stats_d2_td):
ax.loglog(stepsizes, run[0], mark[i], label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.03*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
"""
Explanation: Both d1 and d2 time-dependent
End of explanation
"""
def H_f(t,args):
return 0.125+t/12+t*t/36
def H_bf(t,args):
return 0.125+t/10+t*t/108
sc_op_td = [[sc_op[0]],[sc_op[0],H_bf],[sc_op[0],H_f]]
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist/3, sc_op_td, e_op,
nsubsteps=2000, method="homodyne",solver="taylor15")
y_sse_multi = sol.expect[1]-sol.expect[0]*sol.expect[0].conj()
plt.plot(y_sse_multi)
ntraj = 1
def run_sss_multi(**kwargs):
epsilon = np.zeros(Nt)
std = np.zeros(Nt)
print(kwargs)
for jj in range(0,Nt):
for j in range(0,ntraj):
Nsub = Nsubs[jj]#int(Nsubmax/(2**jj))
sol = ssesolve([H,[c_op[0].dag()*c_op[0]/2,H_f]], fock(N), tlist/3, sc_op_td, e_op, nsubsteps=Nsub, **kwargs)
epsilon_j = 1/T * np.sum(np.abs(y_sse_multi - (sol.expect[1]-sol.expect[0]*sol.expect[0].conj())))*ddt
epsilon[jj] += epsilon_j
std[jj] += epsilon_j
epsilon/= ntraj
std = np.sqrt(1/ntraj * (1/ntraj * std - epsilon**2))
return epsilon
def get_stats_multi(**kw):
y = run_sss_multi(**kw)
tag = str(kw["solver"])
x = np.log(stepsizes)
ly = np.log(y)
fit = np.polyfit(x, ly, 1)[0]
return (y,tag,fit)
stats_multi = []
stats_multi.append(get_stats_multi(solver='euler-maruyama'))
stats_multi.append(get_stats_multi(solver="platen"))
stats_multi.append(get_stats_multi(solver='pred-corr'))
stats_multi.append(get_stats_multi(solver='milstein'))
stats_multi.append(get_stats_multi(solver='milstein-imp'))
stats_multi.append(get_stats_multi(solver='pred-corr-2'))
stats_multi.append(get_stats_multi(solver='explicit1.5'))
stats_multi.append(get_stats_multi(solver="taylor1.5"))
stats_multi.append(get_stats_multi(solver="taylor1.5-imp", tol=1e-9))
fig = plt.figure()
ax = plt.subplot(111)
mark = "o*vspx+^<>Dd"
for run in stats_multi:
ax.loglog(stepsizes, run[0], 'o', label=run[1]+": " + str(run[2]))
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**0.5, label="$\propto\Delta t^{1/2}$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**1, label="$\propto\Delta t$")
ax.loglog(stepsizes, 0.05*np.array(stepsizes)**1.5, label="$\propto\Delta t^{3/2}$")
ax.set_xlabel(r'$\Delta t$ $\left[\gamma^{-1}\right]$')
ax.set_ylabel('deviation')
lgd=ax.legend(loc='center left', bbox_to_anchor=(1, 0.64), prop={'size':12})
"""
Explanation: Multiple sc_ops, time-dependent
End of explanation
"""
from qutip.ipynbtools import version_table
version_table()
"""
Explanation: Versions
End of explanation
"""
|
statsmodels/statsmodels
|
examples/notebooks/statespace_tvpvar_mcmc_cfa.ipynb
|
bsd-3-clause
|
%matplotlib inline
from importlib import reload
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from scipy.stats import invwishart, invgamma
# Get the macro dataset
dta = sm.datasets.macrodata.load_pandas().data
dta.index = pd.date_range('1959Q1', '2009Q3', freq='QS')
"""
Explanation: TVP-VAR, MCMC, and sparse simulation smoothing
End of explanation
"""
# Construct a local level model for inflation
mod = sm.tsa.UnobservedComponents(dta.infl, 'llevel')
# Fit the model's parameters (sigma2_varepsilon and sigma2_eta)
# via maximum likelihood
res = mod.fit()
print(res.params)
# Create simulation smoother objects
sim_kfs = mod.simulation_smoother() # default method is KFS
sim_cfa = mod.simulation_smoother(method='cfa') # can specify CFA method
"""
Explanation: Background
Bayesian analysis of linear Gaussian state space models via Markov chain Monte Carlo (MCMC) methods has become both commonplace and relatively straightforward in recent years, due especially to advances in sampling from the joint posterior of the unobserved state vector conditional on the data and model parameters (see especially Carter and Kohn (1994), de Jong and Shephard (1995), and Durbin and Koopman (2002)). This is particularly useful for Gibbs sampling MCMC approaches.
While these procedures make use of the forward/backward application of the recursive Kalman filter and smoother, another recent line of research takes a different approach and constructs the posterior joint distribution of the entire vector of states at once - see in particular Chan and Jeliazkov (2009) for an econometric time series treatment and McCausland et al. (2011) for a more general survey. In particular, the posterior mean and precision matrix are constructed explicitly, with the latter a sparse band matrix. Advantage is then taken of efficient algorithms for Cholesky factorization of sparse band matrices; this reduces memory costs and can improve performance. Following McCausland et al. (2011), we refer to this method as the "Cholesky Factor Algorithm" (CFA) approach.
The CFA-based simulation smoother has some advantages and some drawbacks compared to that based on the more typical Kalman filter and smoother (KFS).
Advantages of CFA:
Derivation of the joint posterior distribution is relatively straightforward and easy to understand.
In some cases can be both faster and less memory-intensive than the KFS approach
In the Appendix at the end of this notebook, we briefly discuss the performance of the two simulation smoothers for the TVP-VAR model. In summary: simple tests on a single machine suggest that for the TVP-VAR model, the CFA and KFS implementations in Statsmodels have about the same runtimes, while both implementations are about twice as fast as the replication code, written in Matlab, provided by Chan and Jeliazkov (2009).
Drawbacks of CFA:
The main drawback is that this method has not (at least so far) reached the generality of the KFS approach. For example:
It can not be used with models that have reduced-rank error terms in the observation or state equations.
One implication of this is that the typical state space model trick of including identities in the state equation to accommodate, for example, higher-order lags in autoregressive models is not applicable. These models can still be handled by the CFA approach, but at the cost of requiring a slightly different implementation for each lag that is included.
As an example, standard ways of representing ARMA and VARMA processes in state space form do include identities in the observation and/or state equations, and so the basic formulas presented in Chan and Jeliazkov (2009) do not apply immediately to these models.
Less flexibility is available in the state initialization / prior.
Implementation in Statsmodels
A CFA simulation smoother along the lines of the basic formulas presented in Chan and Jeliazkov (2009) has been implemented in Statsmodels.
Notes:
Therefore, the CFA simulation smoother in Statsmodels so-far only supports the case that the state transition is truly a first-order Markov process (i.e. it does not support a p-th order Markov process that has been stacked using identities into a first-order process).
By contrast, the KFS smoother in Statsmodels is fully general any can be used for any state space model, including those with stacked p-th order Markov processes or other identities in the observation and state equations.
Either a KFS or the CFA simulation smoothers can be constructed from a state space model using the simulation_smoother method. To show the basic idea, we first consider a simple example.
Local level model
A local level model decomposes an observed series $y_t$ into a persistent trend $\mu_t$ and a transitory error component
$$
\begin{aligned}
y_t & = \mu_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, \sigma_\text{irregular}^2) \
\mu_t & = \mu_{t-1} + \eta_t, \quad ~ \eta_t \sim N(0, \sigma_\text{level}^2)
\end{aligned}
$$
This model satisfies the requirements of the CFA simulation smoother because both the observation error term $\varepsilon_t$ and the state innovation term $\eta_t$ are non-degenerate - that is, their covariance matrices are full rank.
We apply this model to inflation, and consider simulating draws from the posterior of the joint state vector. That is, we are interested in sampling from
$$p(\mu^t \mid y^t, \sigma_\text{irregular}^2, \sigma_\text{level}^2)$$
where we define $\mu^t \equiv (\mu_1, \dots, \mu_T)'$ and $y^t \equiv (y_1, \dots, y_T)'$.
In Statsmodels, the local level model falls into the more general class of "unobserved components" models, and can be constructed as follows:
End of explanation
"""
nsimulations = 20
simulated_state_kfs = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
simulated_state_cfa = pd.DataFrame(
np.zeros((mod.nobs, nsimulations)), index=dta.index)
for i in range(nsimulations):
# Apply KFS simulation smoothing
sim_kfs.simulate()
# Save the KFS simulated state
simulated_state_kfs.iloc[:, i] = sim_kfs.simulated_state[0]
# Apply CFA simulation smoothing
sim_cfa.simulate()
# Save the CFA simulated state
simulated_state_cfa.iloc[:, i] = sim_cfa.simulated_state[0]
"""
Explanation: The simulation smoother objects sim_kfs and sim_cfa have simulate methods that perform simulation smoothing. Each time that simulate is called, the simulated_state attribute will be re-populated with a new simulated draw from the posterior.
Below, we construct 20 simulated paths for the trend, using the KFS and CFA approaches, where the simulation is at the maximum likelihood parameter estimates.
End of explanation
"""
# Plot the inflation data along with simulated trends
fig, axes = plt.subplots(2, figsize=(15, 6))
# Plot data and KFS simulations
dta.infl.plot(ax=axes[0], color='k')
axes[0].set_title('Simulations based on KFS approach, MLE parameters')
simulated_state_kfs.plot(ax=axes[0], color='C0', alpha=0.25, legend=False)
# Plot data and CFA simulations
dta.infl.plot(ax=axes[1], color='k')
axes[1].set_title('Simulations based on CFA approach, MLE parameters')
simulated_state_cfa.plot(ax=axes[1], color='C0', alpha=0.25, legend=False)
# Add a legend, clean up layout
handles, labels = axes[0].get_legend_handles_labels()
axes[0].legend(handles[:2], ['Data', 'Simulated state'])
fig.tight_layout();
"""
Explanation: Plotting the observed data and the simulations created using each method below, it is not too hard to see that these two methods are doing the same thing.
End of explanation
"""
fig, ax = plt.subplots(figsize=(15, 3))
# Update the model's parameterization to one that attributes more
# variation in inflation to the observation error and so has less
# variation in the trend component
mod.update([4, 0.05])
# Plot simulations
for i in range(nsimulations):
sim_kfs.simulate()
ax.plot(dta.index, sim_kfs.simulated_state[0],
color='C0', alpha=0.25, label='Simulated state')
# Plot data
dta.infl.plot(ax=ax, color='k', label='Data', zorder=-1)
# Add title, legend, clean up layout
ax.set_title('Simulations with alternative parameterization yielding a smoother trend')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[-2:], labels[-2:])
fig.tight_layout();
"""
Explanation: Updating the model's parameters
The simulation smoothers are tied to the model instance, here the variable mod. Whenever the model instance is updated with new parameters, the simulation smoothers will take those new parameters into account in future calls to the simulate method.
This is convenient for MCMC algorithms, which repeatedly (a) update the model's parameters, (b) draw a sample of the state vector, and then (c) draw new values for the model's parameters.
Here we will change the model to a different parameterization that yields a smoother trend, and show how the simulated values change (for brevity we only show the simulations from the KFS approach, but simulations from the CFA approach would be the same).
End of explanation
"""
# Subset to the four variables of interest
y = dta[['realgdp', 'cpi', 'unemp', 'tbilrate']].copy()
y.columns = ['gdp', 'inf', 'unemp', 'int']
# Convert to real GDP growth and CPI inflation rates
y[['gdp', 'inf']] = np.log(y[['gdp', 'inf']]).diff() * 100
y = y.iloc[1:]
fig, ax = plt.subplots(figsize=(15, 5))
y.plot(ax=ax)
ax.set_title('Evolution of macroeconomic variables included in TVP-VAR exercise');
"""
Explanation: Application: Bayesian analysis of a TVP-VAR model by MCMC
One of the applications that Chan and Jeliazkov (2009) consider is the time-varying parameters vector autoregression (TVP-VAR) model, estimated with Bayesian Gibb sampling (MCMC) methods. They apply this to model the co-movements in four macroeconomic time series:
Real GDP growth
Inflation
Unemployment rate
Short-term interest rates
We will replicate their example, using a very similar dataset that is included in Statsmodels.
End of explanation
"""
# 1. Create a new TVPVAR class as a subclass of sm.tsa.statespace.MLEModel
class TVPVAR(sm.tsa.statespace.MLEModel):
# Steps 2-3 are best done in the class "constructor", i.e. the __init__ method
def __init__(self, y):
# Create a matrix with [y_t' : y_{t-1}'] for t = 2, ..., T
augmented = sm.tsa.lagmat(y, 1, trim='both', original='in', use_pandas=True)
# Separate into y_t and z_t = [1 : y_{t-1}']
p = y.shape[1]
y_t = augmented.iloc[:, :p]
z_t = sm.add_constant(augmented.iloc[:, p:])
# Recall that the length of the state vector is p * (p + 1)
k_states = p * (p + 1)
super().__init__(y_t, exog=z_t, k_states=k_states)
# Note that the state space system matrices default to contain zeros,
# so we don't need to explicitly set c_t = d_t = 0.
# Construct the design matrix Z_t
# Notes:
# -> self.k_endog = p is the dimension of the observed vector
# -> self.k_states = p * (p + 1) is the dimension of the observed vector
# -> self.nobs = T is the number of observations in y_t
self['design'] = np.zeros((self.k_endog, self.k_states, self.nobs))
for i in range(self.k_endog):
start = i * (self.k_endog + 1)
end = start + self.k_endog + 1
self['design', i, start:end, :] = z_t.T
# Construct the transition matrix T = I
self['transition'] = np.eye(k_states)
# Construct the selection matrix R = I
self['selection'] = np.eye(k_states)
# Step 3: Initialize the state vector as alpha_1 ~ N(0, 5I)
self.ssm.initialize('known', stationary_cov=5 * np.eye(self.k_states))
# Step 4. Create a method that we can call to update H and Q
def update_variances(self, obs_cov, state_cov_diag):
self['obs_cov'] = obs_cov
self['state_cov'] = np.diag(state_cov_diag)
# Finally, it can be convenient to define human-readable names for
# each element of the state vector. These will be available in output
@property
def state_names(self):
state_names = np.empty((self.k_endog, self.k_endog + 1), dtype=object)
for i in range(self.k_endog):
endog_name = self.endog_names[i]
state_names[i] = (
['intercept.%s' % endog_name] +
['L1.%s->%s' % (other_name, endog_name) for other_name in self.endog_names])
return state_names.ravel().tolist()
"""
Explanation: TVP-VAR model
Note: this section is based on Chan and Jeliazkov (2009) section 3.1, which can be consulted for additional details.
The usual (time-invariant) VAR(1) model is typically written:
$$
\begin{aligned}
y_t & = \mu + \Phi y_{t-1} + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H)
\end{aligned}
$$
where $y_t$ is a $p \times 1$ vector of variables observed at time $t$ and $H$ is a covariance matrix.
The TVP-VAR(1) model generalizes this to allow the coefficients to vary over time according. Stacking all the parameters into a vector according to $\alpha_t = \text{vec}([\mu_t : \Phi_t])$, where $\text{vec}$ denotes the operation that stacks columns of a matrix into a vector, we model their evolution over time according to:
$$\alpha_{i,t+1} = \alpha_{i, t} + \eta_{i,t}, \qquad \eta_{i, t} \sim N(0, \sigma_i^2)$$
In other words, each parameter evolves independently according to a random walk.
Note that there are $p$ coefficients in $\mu_t$ and $p^2$ coefficients in $\Phi_t$, so the full state vector $\alpha$ is shaped $p * (p + 1) \times 1$.
Putting the TVP-VAR(1) model into state-space form is relatively straightforward, and in fact we just have to re-write the observation equation into SUR form:
$$
\begin{aligned}
y_t & = Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H) \
\alpha_{t+1} & = \alpha_t + \eta_t, \qquad \eta_t \sim N(0, \text{diag}({\sigma_i^2}))
\end{aligned}
$$
where
$$
Z_t = \begin{bmatrix}
1 & y_{t-1}' & 0 & \dots & & 0 \
0 & 0 & 1 & y_{t-1}' & & 0 \
\vdots & & & \ddots & \ddots & \vdots \
0 & 0 & 0 & 0 & 1 & y_{t-1}' \
\end{bmatrix}
$$
As long as $H$ is full rank and each of the variances $\sigma_i^2$ is non-zero, the model satisfies the requirements of the CFA simulation smoother.
We also need to specify the initialization / prior for the initial state, $\alpha_1$. Here we will follow Chan and Jeliazkov (2009) in using $\alpha_1 \sim N(0, 5 I)$, although we could also model it as diffuse.
Aside from the time-varying coefficients $\alpha_t$, the other parameters that we will need to estimate are terms in the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
TVP-VAR model in Statsmodels
Constructing this model programatically in Statsmodels is also relatively straightforward, since there are basically four steps:
Create a new TVPVAR class as a subclass of sm.tsa.statespace.MLEModel
Fill in the fixed values of the state space system matrices
Specify the initialization of $\alpha_1$
Create a method for updating the state space system matrices with new values of the covariance matrix $H$ and the random walk variances $\sigma_i^2$.
To do this, first note that the general state space representation used by Statsmodels is:
$$
\begin{aligned}
y_t & = d_t + Z_t \alpha_t + \varepsilon_t, \qquad \varepsilon_t \sim N(0, H_t) \
\alpha_{t+1} & = c_t + T_t \alpha_t + R_t \eta_t, \qquad \eta_t \sim N(0, Q_t) \
\end{aligned}
$$
Then the TVP-VAR(1) model implies the following specializations:
The intercept terms are zero, i.e. $c_t = d_t = 0$
The design matrix $Z_t$ is time-varying but its values are fixed as described above (i.e. its values contain ones and lags of $y_t$)
The observation covariance matrix is not time-varying, i.e. $H_t = H_{t+1} = H$
The transition matrix is not time-varying and is equal to the identity matrix, i.e. $T_t = T_{t+1} = I$
The selection matrix $R_t$ is not time-varying and is also equal to the identity matrix, i.e. $R_t = R_{t+1} = I$
The state covariance matrix $Q_t$ is not time-varying and is diagonal, i.e. $Q_t = Q_{t+1} = \text{diag}({\sigma_i^2})$
End of explanation
"""
# Create an instance of our TVPVAR class with our observed dataset y
mod = TVPVAR(y)
"""
Explanation: The above class defined the state space model for any given dataset. Now we need to create a specific instance of it with the dataset that we created earlier containing real GDP growth, inflation, unemployment, and interest rates.
End of explanation
"""
initial_obs_cov = np.cov(y.T)
initial_state_cov_diag = [0.01] * mod.k_states
# Update H and Q
mod.update_variances(initial_obs_cov, initial_state_cov_diag)
# Perform Kalman filtering and smoothing
# (the [] is just an empty list that in some models might contain
# additional parameters. Here, we don't have any additional parameters
# so we just pass an empty list)
initial_res = mod.smooth([])
"""
Explanation: Preliminary investigation with ad-hoc parameters in H, Q
In our analysis below, we will need to begin our MCMC iterations with some initial parameterization. Following Chan and Jeliazkov (2009) we will set $H$ to be the sample covariance matrix of our dataset, and we will set $\sigma_i^2 = 0.01$ for each $i$.
Before discussing the MCMC scheme that will allow us to make inferences about the model, first we can consider the output of the model when simply plugging in these initial parameters. To fill in these parameters, we use the update_variances method that we defined earlier and then perform Kalman filtering and smoothing conditional on those parameters.
Warning: This exercise is just by way of explanation - we must wait for the output of the MCMC exercise to study the actual implications of the model in a meaningful way.
End of explanation
"""
def plot_coefficients_by_equation(states):
fig, axes = plt.subplots(2, 2, figsize=(15, 8))
# The way we defined Z_t implies that the first 5 elements of the
# state vector correspond to the first variable in y_t, which is GDP growth
ax = axes[0, 0]
states.iloc[:, :5].plot(ax=ax)
ax.set_title('GDP growth')
ax.legend()
# The next 5 elements correspond to inflation
ax = axes[0, 1]
states.iloc[:, 5:10].plot(ax=ax)
ax.set_title('Inflation rate')
ax.legend();
# The next 5 elements correspond to unemployment
ax = axes[1, 0]
states.iloc[:, 10:15].plot(ax=ax)
ax.set_title('Unemployment equation')
ax.legend()
# The last 5 elements correspond to the interest rate
ax = axes[1, 1]
states.iloc[:, 15:20].plot(ax=ax)
ax.set_title('Interest rate equation')
ax.legend();
return ax
"""
Explanation: The initial_res variable contains the output of Kalman filtering and smoothing, conditional on those initial parameters. In particular, we may be interested in the "smoothed states", which are $E[\alpha_t \mid y^t, H, {\sigma_i^2}]$.
First, lets create a function that graphs the coefficients over time, separated into the equations for equation of the observed variables.
End of explanation
"""
# Here, for illustration purposes only, we plot the time-varying
# coefficients conditional on an ad-hoc parameterization
# Recall that `initial_res` contains the Kalman filtering and smoothing,
# and the `states.smoothed` attribute contains the smoothed states
plot_coefficients_by_equation(initial_res.states.smoothed);
"""
Explanation: Now, we are interested in the smoothed states, which are available in the states.smoothed attribute out our results object initial_res.
As the graph below shows, the initial parameterization implies substantial time-variation in some of the coefficients.
End of explanation
"""
# Prior hyperparameters
# Prior for obs. cov. is inverse-Wishart(v_1^0=k + 3, S10=I)
v10 = mod.k_endog + 3
S10 = np.eye(mod.k_endog)
# Prior for state cov. variances is inverse-Gamma(v_{i2}^0 / 2 = 3, S+{i2}^0 / 2 = 0.005)
vi20 = 6
Si20 = 0.01
"""
Explanation: Bayesian estimation via MCMC
We will now implement the Gibbs sampler scheme described in Chan and Jeliazkov (2009), Algorithm 2.
We use the following (conditionally conjugate) priors:
$$
\begin{aligned}
H & \sim \mathcal{IW}(\nu_1^0, S_1^0) \
\sigma_i^2 & \sim \mathcal{IG} \left ( \frac{\nu_{i2}^0}{2}, \frac{S_{i2}^0}{2} \right )
\end{aligned}
$$
where $\mathcal{IW}$ denotes the inverse-Wishart distribution and $\mathcal{IG}$ denotes the inverse-Gamma distribution. We set the prior hyperparameters as:
$$
\begin{aligned}
v_1^0 = T + 3, & \quad S_1^0 = I \
v_{i2}^0 = 6, & \quad S_{i2}^0 = 0.01 \qquad \text{for each} ~ i\
\end{aligned}
$$
End of explanation
"""
# Gibbs sampler setup
niter = 11000
nburn = 1000
# 1. Create storage arrays
store_states = np.zeros((niter + 1, mod.nobs, mod.k_states))
store_obs_cov = np.zeros((niter + 1, mod.k_endog, mod.k_endog))
store_state_cov = np.zeros((niter + 1, mod.k_states))
# 2. Put in the initial values
store_obs_cov[0] = initial_obs_cov
store_state_cov[0] = initial_state_cov_diag
mod.update_variances(store_obs_cov[0], store_state_cov[0])
# 3. Construct posterior samplers
sim = mod.simulation_smoother(method='cfa')
"""
Explanation: Before running the MCMC iterations, there are a couple of practical steps:
Create arrays to store the draws of our state vector, observation covariance matrix, and state error variances.
Put the initial values for H and Q (described above) into the storage vectors
Construct the simulation smoother object associated with our TVPVAR instance to make draws of the state vector
End of explanation
"""
for i in range(niter):
mod.update_variances(store_obs_cov[i], store_state_cov[i])
sim.simulate()
# 1. Sample states
store_states[i + 1] = sim.simulated_state.T
# 2. Simulate obs cov
fitted = np.matmul(mod['design'].transpose(2, 0, 1), store_states[i + 1][..., None])[..., 0]
resid = mod.endog - fitted
store_obs_cov[i + 1] = invwishart.rvs(v10 + mod.nobs, S10 + resid.T @ resid)
# 3. Simulate state cov variances
resid = store_states[i + 1, 1:] - store_states[i + 1, :-1]
sse = np.sum(resid**2, axis=0)
for j in range(mod.k_states):
rv = invgamma.rvs((vi20 + mod.nobs - 1) / 2, scale=(Si20 + sse[j]) / 2)
store_state_cov[i + 1, j] = rv
"""
Explanation: As before, we could have used either the simulation smoother based on the Kalman filter and smoother or that based on the Cholesky Factor Algorithm.
End of explanation
"""
# Collect the posterior means of each time-varying coefficient
states_posterior_mean = pd.DataFrame(
np.mean(store_states[nburn + 1:], axis=0),
index=mod._index, columns=mod.state_names)
# Plot these means over time
plot_coefficients_by_equation(states_posterior_mean);
"""
Explanation: After removing a number of initial draws, the remaining draws from the posterior allow us to conduct inference. Below, we plot the posterior mean of the time-varying regression coefficients.
(Note: these plots are different from those in Figure 1 of the published version of Chan and Jeliazkov (2009), but they are very similar to those produced by the Matlab replication code available at http://joshuachan.org/code/code_TVPVAR.html)
End of explanation
"""
import arviz as az
# Collect the observation error covariance parameters
az_obs_cov = az.convert_to_inference_data({
('Var[%s]' % mod.endog_names[i] if i == j else
'Cov[%s, %s]' % (mod.endog_names[i], mod.endog_names[j])):
store_obs_cov[nburn + 1:, i, j]
for i in range(mod.k_endog) for j in range(i, mod.k_endog)})
# Plot the credible intervals
az.plot_forest(az_obs_cov, figsize=(8, 7));
# Collect the state innovation variance parameters
az_state_cov = az.convert_to_inference_data({
r'$\sigma^2$[%s]' % mod.state_names[i]: store_state_cov[nburn + 1:, i]
for i in range(mod.k_states)})
# Plot the credible intervals
az.plot_forest(az_state_cov, figsize=(8, 7));
"""
Explanation: Python also has a number of libraries to assist with exploring Bayesian models. Here we'll just use the arviz package to explore the credible intervals of each of the covariance and variance parameters, although it makes available a much wider set of tools for analysis.
End of explanation
"""
from statsmodels.tsa.statespace.simulation_smoother import SIMULATION_STATE
sim_cfa = mod.simulation_smoother(method='cfa')
sim_kfs = mod.simulation_smoother(simulation_output=SIMULATION_STATE)
"""
Explanation: Appendix: performance
Finally, we run a few simple tests to compare the performance of the KFS and CFA simulation smoothers by using the %timeit Jupyter notebook magic.
One caveat is that the KFS simulation smoother can produce a variety of output beyond just simulations of the posterior state vector, and these additional computations could bias the results. To make the results comparable, we will tell the KFS simulation smoother to only compute simulations of the state by using the simulation_output argument.
End of explanation
"""
|
ogoann/StatisticalMethods
|
examples/Cepheids/FirstLook.ipynb
|
gpl-2.0
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (15.0, 8.0)
"""
Explanation: A First Look at the Periods and Luminosities of Cepheid Stars
Cepheids are stars whose brightness oscillates with a stable period that appears to be strongly correlated with their luminosity (or absolute magnitude).
A lot of monitoring data - repeated imaging and subsequent "photometry" of the star - can provide a measurement of the absolute magnitude (if we know the distance to it's host galaxy) and the period of the oscillation.
Let's look at some Cepheid measurements reported by Riess et al (2011). Like the correlation function summaries, they are in the form of datapoints with error bars, where it is not clear how those error bars were derived (or what they mean).
End of explanation
"""
# First, we need to know what's in the data file.
!head R11ceph.dat
class Cepheids(object):
def __init__(self,filename):
# Read in the data and store it in this master array:
self.data = np.loadtxt(filename)
self.hosts = self.data[:,1].astype('int').astype('str')
# We'll need the plotting setup to be the same each time we make a plot:
colornames = ['red','orange','yellow','green','cyan','blue','violet','magenta','gray']
self.colors = dict(zip(self.list_hosts(), colornames))
self.xlimits = np.array([0.3,2.3])
self.ylimits = np.array([30.0,17.0])
return
def list_hosts(self):
# The list of (9) unique galaxy host names:
return np.unique(self.hosts)
def select(self,ID):
# Pull out one galaxy's data from the master array:
index = (self.hosts == str(ID))
self.m = data[index,2]
self.merr = data[index,3]
self.logP = np.log10(data[index,4])
return
def plot(self,X):
# Plot all the points in the dataset for host galaxy X.
ID = str(X)
self.select(ID)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.errorbar(self.logP, self.m, yerr=self.merr, fmt='.', ms=7, lw=1, color=self.colors[ID], label='NGC'+ID)
plt.xlabel('$\\log_{10} P / {\\rm days}$',fontsize=20)
plt.ylabel('${\\rm magnitude (AB)}$',fontsize=20)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
plt.title('Cepheid Period-Luminosity (Riess et al 2011)',fontsize=20)
return
def overlay_straight_line_with(self,m=0.0,c=24.0):
# Overlay a straight line with gradient m and intercept c.
x = self.xlimits
y = m*x + c
plt.plot(x, y, 'k-', alpha=0.5, lw=2)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
return
def add_legend(self):
plt.legend(loc='upper left')
return
C = Cepheids('R11ceph.dat')
print C.colors
"""
Explanation: A Look at Each Host Galaxy's Cepheids
Let's read in all the data, and look at each galaxy's Cepheid measurements separately. Instead of using pandas, we'll write our own simple data structure, and give it a custom plotting method so we can compare the different host galaxies' datasets.
End of explanation
"""
C.plot(4258)
C.plot(1309)
# for ID in C.list_hosts():
# C.plot(ID)
C.overlay_straight_line_with(m=-3.0,c=26.0)
C.add_legend()
"""
Explanation: OK, now we are all set up! Let's plot some data.
End of explanation
"""
|
nslatysheva/data_science_blogging
|
tricks_of_the_trade_ensembling/messy_modelling_simplified.ipynb
|
gpl-3.0
|
# Creating the dataset
# e.g. make_moons generates crescent-shaped data
# Check out make_classification, which generates ~linearly-separable data
from sklearn.datasets import make_moons
X, y = make_moons(
n_samples=500, # the number of observations
random_state=1,
noise=0.3 #0.3
)
# Take a peek
print(X[:10,])
print(y[:10])
"""
Explanation: Messy modelling: overfitting, cross-validation, and the bias-variance trade-off
Introduction
In this post you will get to grips with perhaps the most essential concept in machine learning: the bias-variance trade-off. The main idea here is that you want to create models that are as good at prediction as possible but that are still applicable to new data (i.e. are generalizable). The danger is that you can easily create models that overfit to the local noise in your specific dataset, which isn't too helpful and leads to poor generalizability since the noise is random and different in each dataset. Essentially, you want to create models that capture only the useful components of the dataset. Models that generalize very well but are too inflexible to generate good predictions are the other extreme we want to avoid (underfitting).
We discuss and demonstrate these concepts using the k-nearest neighbours algorithm, which has a simple parameter k which can be varied to cleanly demonstrate these ideas of underfitting, overfitting and generalization. Together, this bundle of concepts related to the balance between underfitting and overfitting is referred to as the bias-variance trade-off. Here is a table summarizing these different, related aspects of models, which you can refer to throughout this post.
We will explain what all of these terms mean and how they are inter-related. We will also discuss cross-validation, which is a good way of estimating the accuracy and generalizability of your models.
You will encounter all of these concepts in the next few blog posts in this series, which will cover model optimization, random forests, Naive Bayes, logistic regression and combining different models into an ensembled meta-model.
Generating the dataset
Let's start off by building an artificial dataset to play with. We can do this easily with the make_classification() function from the sklearn.datasets package. Specifically, we will generate a relatively simple binary classification problem. To make it a bit more interesting, let's make the data crescent-shaped and add some random noise. This should make it more realistic and increase the difficulty of classifying observations.
End of explanation
"""
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
%matplotlib inline
# Plot the first feature against the other, color by class
plt.scatter(X[y == 1, 0], X[y == 1, 1], color="#EE3D34", marker="x")
plt.scatter(X[y == 0, 0], X[y == 0, 1], color="#4458A7", marker="o")
"""
Explanation: The dataset we just generated looks a bit like this:
End of explanation
"""
from sklearn.cross_validation import train_test_split
# Split into training and test sets
XTrain, XTest, yTrain, yTest = train_test_split(X, y, random_state=1)
"""
Explanation: Next up, let's split the dataset into a training and test set. The training set will be used to develop and tune our models. The test set will be completely left alone until the very end, at which point you'll run your finished models on it. Having a test set will allow you to get a good estimate of how well your models would perform out in the wild on unseen data.
End of explanation
"""
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
knn99 = KNeighborsClassifier(n_neighbors = 99)
knn99.fit(XTrain, yTrain)
yPredK99 = knn99.predict(XTest)
print "Overall Error of k=99 Model:", 1 - round(metrics.accuracy_score(yTest, yPredK99), 2)
knn1 = KNeighborsClassifier(n_neighbors = 1)
knn1.fit(XTrain, yTrain)
yPredK1 = knn1.predict(XTest)
print "Overall Error of k=1 Model:", 1 - round(metrics.accuracy_score(yTest, yPredK1), 2)
"""
Explanation: We are going to try to predict the classes with a k Nearest Neighbor (kNN) classifier. Chapter 2 of the Introduction to Statistical Learning book provides a great intro to the theory behind kNN. We are huge fans of the ISLR book, so definitely check it out if you have the time. You could also have a look at this previous post that teaches you how to implement the algorithm from scratch in Python.
Introducing the k hyperparameter in kNN
The kNN algorithm works by using information on the k-nearest neighbours of a new data point in order to classify it into a class. It simply looks at the class of other data points most similar to it (its 'nearest neighbours') and assigns the new data point to most common class of these neighbours. When using kNN, you have to set the value of k that you want the algorithm to use ahead of time, and it is not trivial to know which value to use.
If the value for k is high (e.g. k=99), then the model considers a large number of neighbours when making a a decision about the class of an unknown datapoint. This means that the model is quite constrained, since it has to take a large amount of information into account when classifying instances. In other words, a high number for k give rise to relatively "rigid" model behaviour.
By contrast, if the value for k is low (e.g. k=1 or k=2), then only a few neighbours are taken into account when making a classification decision. It is a very flexible model with a lot of complexity - it really fits very closely to the precise shape of the dataset. Hence, the predictions of the model are much more dependent on the local tendencies of the data (crucially, this includes the noise!).
Take a look at how the kNN algorithm separates the training cases when k=99 compared to when k=1. The green line is the decision boundary on the training data (i.e. the point at which the algorithm decides whether a data point as being blue or red).
In a minute, you'll learn to generate these plots yourself.
When k=99 (on the left), it looks like the model fit might be a bit too smooth and could stand to fit the data a bit closer. The model has low flexibility and low complexity. It paints the decision boundary with a broad brush. It has relatively high bias because we can tell it is not modelling the data as well as it could - it models the underlying generative process of the data as something too simple, and this is highly biased away from the ground truth. But, the decision boundary would probably look very similar if we redrew it on a slightly different dataset. It is a stable model that won't vary a lot - it has low variance.
When k=1 (on the right), you can see that the model is massively overfitting to the noise. It is technically perfectly correct on the training set (the error in the bottom right hand corner is equal to 0.00!), but hopefully you can see how this fit is way too sensitive to individual data points. Keep in mind we added noise to the dataset - it looks like this model fit is taking the noise too seriously and is fitting very closely to it. We can say that the k=1 model has high flexibility and high complexity. It tunes very tightly to the data. It also has low bias - if nothing else, the decision boundary certainly fits the trends in the data. But, the fitted boundary would drastically change on even slightly different data - it would vary significantly, i.e. the k=1 model has high variance.
But how well do these models generalize, i.e. how well would they perform on new data?
We have so far only looked at the training data, but quantifying training error isn't that useful. We want to know how well the models are modelling the underlying generative process of the data, not how well they recapitulate what they just learned on the training set. Let's take a look at how they perform on test data, since that gives a better impression of whether our models are actually good or not.
End of explanation
"""
knn50 = KNeighborsClassifier(n_neighbors = 50)
knn50.fit(XTrain, yTrain)
yPredK50 = knn50.predict(XTest)
print "Overall Error of k=50 Model:", 1 - round(metrics.accuracy_score(yTest, yPredK50), 2)
"""
Explanation: Actually, it looks like these models perform approximately as well on the test data. Here are the decision boundaries we learned on the training set, applied to the test set. See if you can figure out where the two models are making their mistakes.
It seems that the k=99 model isn't doing a good job at capturing the crescent shape of the data (it is underfitting), while the k=1 model is making mistakes from being horribly overfitted. The hallmark of overfitting is good training performance and bad testing performance, which is what we observe here.
Maybe intermediate values of k are where we want to be? Let's give it a shot:
End of explanation
"""
from sklearn.cross_validation import train_test_split, cross_val_score
knn = KNeighborsClassifier()
# the range of number of neighbours we want to test
n_neighbors = np.arange(1, 141, 2)
# here we store the results of each model
train_scores = list()
test_scores = list()
cv_scores = list()
# loop through possible n_neighbours and try them out
for n in n_neighbors:
knn.n_neighbors = n
knn.fit(XTrain, yTrain)
train_scores.append(1 - metrics.accuracy_score(yTrain, knn.predict(XTrain))) # this will over-estimate the accuracy
test_scores.append(1 - metrics.accuracy_score(yTest, knn.predict(XTest)))
cv_scores.append(1 - cross_val_score(knn, XTrain, yTrain, cv = 5).mean()) # we take the mean of the CV scores
# what do these different sources think is the best value of k?
print('The best values of k are: \n{} According to the Training Set\n{} According to the Test Set and\n{} According to Cross-Validation'.format(
n_neighbors[train_scores == min(train_scores)],
n_neighbors[test_scores == min(test_scores)],
n_neighbors[cv_scores == min(cv_scores)]
)
)
# let's plot the error we get with different values of k
plt.plot(n_neighbors, train_scores, c = "grey", label = "Training Set")
plt.plot(n_neighbors, test_scores, c = "orange", label = "Test Set")
plt.plot(n_neighbors, cv_scores, c = "green", label = "Cross-Validation")
plt.xlabel('Number of K Nearest Neighbors')
plt.ylabel('Classification Error')
plt.gca().invert_xaxis()
plt.legend(loc = "lower left")
plt.show()
"""
Explanation: Looking better! Let's check out the decision boundary for the k=50 model.
Much better - the model fit is similar to the actual trend in the dataset and this improvement is reflected in a lower test set error.
The bias-variance trade-off: concluding comments
Hopefully you now have a good intuition over what it means for models to underfit and overfit. See if all of the terms in the beginning of this post now make sense. Before we throw tons of code at you, let's finish up talking about the bias-variance trade-off.
To recap, when we train machine learning algorithms on a dataset, what we are really interested in is how our model will perform on an independent data set. It is not enough to do a good job classifying instances on the training set. Essentially, we are only interested in building models that are generalizable - getting 100% accuracy on the training set is not impressive, and is simply an indicator of overfitting. Overfitting is the situation in which we have fitted our model too closely to the data, and have tuned to the noise instead of just to the signal.
To be clear: strictly speaking, we are not trying to model the trends in the dataset. We try to model the underlying generative process that has created the data. The specific dataset we happen to be working with is just a small set of instances (i.e. a sample) of the ground truth, which brings with it its own noise and peculiarities.
Here is a summary figure showing how under-fitting (high bias, low variance), properly fitting, and over-fitting (low bias, high variance) models fare on the training compared to the test sets:
This idea of building generalizable models is the motivation behind splitting your dataset into a training set (on which models can be trained) and a test set (which is held out until the very end of your analysis, and provides an accurate measure of model performance).
But - big warning! It's also possibly to overfit to the test set. If we were to try lots of different models out and keep changing them in order to chase accuracy points on the test set, then the information from the test set can inadvertently leak into our model creation phase, which is a big no-no. We need a way around this.
Estimating model performance using k-fold cross validation
Enter k-fold cross-validation, which is a handy technique for measuring a model's performance using only the training set. Say that we want to do e.g. 10-fold cross-validation. The process is as follows: we randomly partition the training set into 10 equal sections. Then, we train an algorithm on 9/10ths (i.e. 9 out of the 10 sections) of that training set. We then evaluate its performance on the remaining 1 section. This gives us some measure of the model's performance (e.g. overall accuracy). We then train the algorithm on a different 9/10ths of the training set, and evaluate on the other (different from before) remaining 1 section. We continue the process 10 times, get 10 different measures of model performance, and average these values to get an overall measure of performance. Of course, we could have chosen some number other than 10. To keep on with the example, the process behind 10-fold CV looks like this:
We can use k-fold cross validation to get an estimate of model accuracy, and we can use these estimates to tweak our model until we are happy. This lets us leave the test data alone until the very end, thus side-stepping the danger of overfitting to it. k-fold cross validation is extremely popular and very useful, especially if you're trying out lots and lots of different models (e.g. if you want to test how well a load of differently parameterized models perform).
Comparing training error, cross-validation error, and test error
Let's try out different values for k and see which ones fare best on the training set, in k-fold cross validation, and on the test set!
End of explanation
"""
# Let's tone down the noise in the dataset
X_no_noise, y_no_noise = make_moons(
n_samples=500, # the number of observations
random_state=1,
noise=0.1
)
# Plot the first feature against the other, color by class
plt.scatter(X_no_noise[y == 1, 0], X_no_noise[y == 1, 1], color="#EE3D34", marker="x")
plt.scatter(X_no_noise[y == 0, 0], X_no_noise[y == 0, 1], color="#4458A7", marker="o")
"""
Explanation: This plot really reinforces the point that low values of k tend to lead to very little error (i.e. high accuracy) on the training set, but much more error in the testing set and on cross-validation. We can also see that cross-validation is a reasonable estimator of test error.
Wait, what about datasets without lots of noise?
Whew, lots of theory! Just a bit more. We have been talking all along about how overfitting occurs when models starting tuning to the noise. But what if we had a dataset without a lot of noise? Would overfitting really be so bad then? What would "overfitting" even mean in this context? For example, say we had this dataset:
End of explanation
"""
import numpy as np
def detect_plot_dimension(X, h=0.02, b=0.05):
x_min, x_max = X[:, 0].min() - b, X[:, 0].max() + b
y_min, y_max = X[:, 1].min() - b, X[:, 1].max() + b
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
dimension = xx, yy
return dimension
def detect_decision_boundary(dimension, model):
xx, yy = dimension # unpack the dimensions
boundary = model.predict(np.c_[xx.ravel(), yy.ravel()])
boundary = boundary.reshape(xx.shape) # Put the result into a color plot
return boundary
def plot_decision_boundary(panel, dimension, boundary, colors=['#DADDED', '#FBD8D8']):
xx, yy = dimension # unpack the dimensions
panel.contourf(xx, yy, boundary, cmap=ListedColormap(colors), alpha=1)
panel.contour(xx, yy, boundary, colors="g", alpha=1, linewidths=0.5) # the decision boundary in green
def plot_dataset(panel, X, y, colors=["#EE3D34", "#4458A7"], markers=["x", "o"]):
panel.scatter(X[y == 1, 0], X[y == 1, 1], color=colors[0], marker=markers[0])
panel.scatter(X[y == 0, 0], X[y == 0, 1], color=colors[1], marker=markers[1])
def calculate_prediction_error(model, X, y):
yPred = model.predict(X)
score = 1 - round(metrics.accuracy_score(y, yPred), 2)
return score
def plot_prediction_error(panel, dimension, score, b=.3):
xx, yy = dimension # unpack the dimensions
panel.text(xx.max() - b, yy.min() + b, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right')
def explore_fitting_boundaries(model, n_neighbors, datasets, width):
# determine the height of the plot given the aspect ration of each panel should be equal
height = float(width)/len(n_neighbors) * len(datasets.keys())
nrows = len(datasets.keys())
ncols = len(n_neighbors)
# set up the plot
figure, axes = plt.subplots(
nrows,
ncols,
figsize=(width, height),
sharex=True,
sharey=True
)
dimension = detect_plot_dimension(X, h=0.02) # the dimension each subplot based on the data
# Plotting the dataset and decision boundaries
i = 0
for n in n_neighbors:
model.n_neighbors = n
model.fit(datasets["Training Set"][0], datasets["Training Set"][1])
boundary = detect_decision_boundary(dimension, model)
j = 0
for d in datasets.keys():
try:
panel = axes[j, i]
except (TypeError, IndexError):
if (nrows * ncols) == 1:
panel = axes
elif nrows == 1: # if we only have one dataset
panel = axes[i]
elif ncols == 1: # if we only try one number of neighbors
panel = axes[j]
plot_decision_boundary(panel, dimension, boundary) # plot the decision boundary
plot_dataset(panel, X=datasets[d][0], y=datasets[d][1]) # plot the observations
score = calculate_prediction_error(model, X=datasets[d][0], y=datasets[d][1])
plot_prediction_error(panel, dimension, score, b=0.2) # plot the score
# make compacted layout
panel.set_frame_on(False)
panel.set_xticks([])
panel.set_yticks([])
# format the axis labels
if i == 0:
panel.set_ylabel(d)
if j == 0:
panel.set_title('k={}'.format(n))
j += 1
i += 1
plt.subplots_adjust(hspace=0, wspace=0) # make compacted layout
"""
Explanation: Running the kNN code (with k=1) on the original noisy dataset (left) and the new less noisy dataset (right) leads to this result:
Obviously this is an extreme example, but in this case it turned out that the less noisy data (right) actually needed a very finely tuned model (k=1), whereas this model previously overfitted on the more noisy dataset (left). Most real-life datasets will be full of noise, so overfitting is always a danger. However, what exactly consistutes "overfitting" will differ tremendously between datasets, and you have to decide what course of action is optimal. The best guidance is your model's performance on the test set and in cross-validation. If a model that "should" be overfitting is actually generalizing well, then no need to worry.
Just show me the code!
Aha, so you've made it this far. Here is our code for generating the above plots, and doing the training and testing of different kNN algorithms. This code is largely a simplified version of this scikit-learn example, and most it deals with the finicky details of making the plots look nice. The meaty machine learning parts of splitting the dataset, fitting the algorithm, and testing it were covered above.
End of explanation
"""
# specify the model and settings
model = KNeighborsClassifier()
n_neighbors = [200, 100, 20, 5, 1]
datasets = {
"Training Set": [XTrain, yTrain],
"Test Set": [XTest, yTest]
}
width = 15
# explore_fitting_boundaries(model, n_neighbors, datasets, width)
explore_fitting_boundaries(model=model, n_neighbors=n_neighbors, datasets=datasets, width=width)
"""
Explanation: We then run the code like this:
End of explanation
"""
|
wittawatj/fsic-test
|
ipynb/ex2_results.ipynb
|
mit
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
#%config InlineBackend.figure_format = 'pdf'
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import fsic.data as data
import fsic.glo as glo
import fsic.indtest as it
import fsic.kernel as kernel
import fsic.plot as plot
import fsic.util as util
import scipy.stats as stats
plot.set_default_matplotlib_options()
"""
# font options
font = {
#'family' : 'normal',
#'weight' : 'bold',
'size' : 18
}
plt.rc('font', **font)
"""
def load_plot_vs_params(fname, h1_true=True, xlabel='Problem parameter', show_legend=True):
func_xvalues = lambda agg_results: agg_results['prob_params']
ex = 2
def func_title(agg_results):
repeats, _, n_methods = agg_results['job_results'].shape
alpha = agg_results['alpha']
test_size = (1.0 - agg_results['tr_proportion'])*agg_results['sample_size']
title = '%s. %d trials. test size: %d. $\\alpha$ = %.2g.'%\
( agg_results['prob_label'], repeats, test_size, alpha)
return title
#plt.figure(figsize=(10,5))
results = plot.plot_prob_reject(
ex, fname, h1_true, func_xvalues, xlabel=xlabel, func_title=func_title)
plt.title('')
plt.gca().legend(loc='best').set_visible(show_legend)
#plt.grid(True)
return results
def load_runtime_vs_params(fname, h1_true=True, xlabel='Problem parameter',
show_legend=True, xscale='linear', yscale='log'):
func_xvalues = lambda agg_results: agg_results['prob_params']
ex = 2
def func_title(agg_results):
repeats, _, n_methods = agg_results['job_results'].shape
alpha = agg_results['alpha']
title = '%s. %d trials. $\\alpha$ = %.2g.'%\
( agg_results['prob_label'], repeats, alpha)
return title
#plt.figure(figsize=(10,6))
results = plot.plot_runtime(ex, fname,
func_xvalues, xlabel=xlabel, func_title=func_title)
plt.title('')
plt.gca().legend(loc='best').set_visible(show_legend)
#plt.grid(True)
if xscale is not None:
plt.xscale(xscale)
if yscale is not None:
plt.yscale(yscale)
return results
# H0 true. Same Gaussian.
sg_fname = 'ex2-sg-me6_n4000_J1_rs300_pmi10.000_pma90.000_a0.050_trp0.50.p'
#sg_fname = 'ex2-sg-me5_n4000_J1_rs300_pmi10.000_pma90.000_a0.050_trp0.50.p'
#g_results = load_plot_vs_params(
# sg_fname, h1_true=False, xlabel='$d_x$ and $d_y$', show_legend=True)
#lt.ylim([0.03, 0.1])
#plt.savefig(gmd_fname.replace('.p', '.pdf', 1))
# H0 true. Same Gaussian. Large dimensions
#bsg_fname = 'ex2-bsg-me7_n4000_J1_rs300_pmi100.000_pma500.000_a0.050_trp0.50.p'
bsg_fname = 'ex2-bsg-me6_n4000_J1_rs300_pmi100.000_pma400.000_a0.050_trp0.50.p'
#bsg_results = load_plot_vs_params(bsg_fname, h1_true=False, xlabel='$d_x$ and $d_y$',
# show_legend=False)
#plt.ylim([0.03, 0.1])
#plt.savefig(bsg_fname.replace('.p', '.pdf', 1), bbox_inches='tight')
# sin frequency problem
sin_fname = 'ex2-sin-me6_n4000_J1_rs300_pmi1.000_pma6.000_a0.050_trp0.50.p'
# sin_fname = 'ex2-sin-me6_n4000_J1_rs100_pmi1.000_pma6.000_a0.050_trp0.20.p'
#sin_fname = 'ex2-sin-me7_n4000_J1_rs300_pmi1.000_pma6.000_a0.050_trp0.50.p'
sin_results = load_plot_vs_params(
sin_fname, h1_true=True, xlabel=r'$\omega$ in $1+\sin(\omega x)\sin(\omega y)$',
show_legend=False)
plt.savefig(sin_fname.replace('.p', '.pdf', 1), bbox_inches='tight')
# Gaussian sign problem
gsign_fname = 'ex2-gsign-me6_n4000_J1_rs300_pmi1.000_pma6.000_a0.050_trp0.50.p'
#gsign_fname = 'ex2-gsign-me7_n4000_J1_rs300_pmi1.000_pma6.000_a0.050_trp0.50.p'
#gsign_fname = 'ex2-gsign-me10_n4000_J1_rs100_pmi1.000_pma5.000_a0.050_trp0.50.p'
gsign_results = load_plot_vs_params(gsign_fname, h1_true=True,
xlabel='$d_x$', show_legend=False)
# plt.legend(bbox_to_anchor=(1.1, 1.05))
plt.savefig(gsign_fname.replace('.p', '.pdf', 1), bbox_inches='tight')
"""
Explanation: A notebook to process experimental results of ex2_prob_params.py. p(reject) as problem parameters are varied.
End of explanation
"""
# H0 true. Same Gaussian. medium-sized dimensions
#msg_fname = 'ex2-msg-me10_n4000_J1_rs100_pmi100.000_pma500.000_a0.050_trp0.50.p'
msg_fname = 'ex2-msg-me6_n4000_J1_rs300_pmi50.000_pma250.000_a0.050_trp0.50.p'
msg_results = load_plot_vs_params(msg_fname, h1_true=False, xlabel='$d_x$ and $d_y$',
show_legend=False)
plt.savefig(msg_fname.replace('.p', '.pdf', 1), bbox_inches='tight')
#plt.ylim([0.03, 0.1])
load_runtime_vs_params(msg_fname, h1_true=False, show_legend=False,
yscale='log', xlabel='$d_x$ and $d_y$');
plt.savefig(msg_fname.replace('.p', '', 1)+'_time.pdf', bbox_inches='tight')
# pairwise sign problem
pws_fname = 'ex2-pwsign-me6_n4000_J1_rs200_pmi20.000_pma100.000_a0.050_trp0.50.p'
#pwd_results = load_plot_vs_params(
# pws_fname, h1_true=True, xlabel=r'$d$',
# show_legend=True)
#plt.ylim([0, 1.1])
# uniform rotate with noise dimensions
urot_noise_fname = 'ex2-urot_noise-me6_n4000_J1_rs200_pmi0.000_pma6.000_a0.050_trp0.50.p'
#urot_noise_results = load_plot_vs_params(
# urot_noise_fname, h1_true=True, xlabel='Noise dimensions for X and Y',
# show_legend=True)
# Vary the rotation angle
#u2drot_fname = 'ex2-u2drot-me8_n4000_J1_rs200_pmi0.000_pma10.000_a0.010_trp0.50.p'
u2drot_fname = 'ex2-u2drot-me6_n4000_J1_rs200_pmi0.000_pma10.000_a0.050_trp0.50.p'
#u2drot_fname = 'ex2-u2drot-me5_n4000_J1_rs300_pmi0.000_pma10.000_a0.050_trp0.50.p'
#u2drot_results = load_plot_vs_params(
# u2drot_fname, h1_true=True, xlabel='Rotation angle (in degrees)', show_legend=True)
#plt.ylim([0, 0.05])
"""
Explanation: A toy problem where X follows the standard multivariate Gaussian,
and Y = sign(product(X))*|Z| where Z ~ N(0, 1).
End of explanation
"""
#fname = 'sin-job_nfsicJ10_opt-n4000_J1_r220_p5.000_a0.010_trp0.50.p'
#fname = 'sg-job_nfsicJ10_perm_med-n4000_J1_r8_p50.000_a0.050_trp0.50.p'
#fpath = glo.ex_result_file(2, 'sg', fname)
#result = glo.pickle_load(fpath)
"""
Explanation: Examine a trial file
End of explanation
"""
#fname = 'ex2-sin-me7_n4000_J1_rs200_pmi1.000_pma5.000_a0.010_trp0.50.p'
fname = 'ex2-sg-me6_n4000_J1_rs100_pmi10.000_pma90.000_a0.050_trp0.50.p'
#fname = 'ex2-u2drot-me7_n4000_J1_rs200_pmi0.000_pma10.000_a0.010_trp0.50.p'
fpath = glo.ex_result_file(2, fname)
result = glo.pickle_load(fpath)
def load_tpm_table(ex, fname, key):
"""
Load a trials x parameters x methods numpy array of results.
The value to load is specified by the key.
"""
results = glo.ex_load_result(ex, fname)
f_val = lambda job_results: job_results['test_result'][key]
vf_val = np.vectorize(f_val)
# results['job_results'] is a dictionary:
# {'test_result': (dict from running perform_test(te) '...':..., }
vals = vf_val(results['job_results'])
#repeats, _, n_methods = results['job_results'].shape
met_job_funcs = results['method_job_funcs']
return vals, met_job_funcs
sta, met_job_funcs = load_tpm_table(ex=2, fname=fname, key='test_stat')
sta.shape
met_job_funcs
nfsicJ10_stats = sta[:, :, 1]
plt.figure(figsize=(12, 5))
plt.imshow(nfsicJ10_stats.T, interpolation='none')
plt.colorbar(orientation='horizontal')
J = 10
thresh = stats.chi2.isf(0.05, df=J)
np.mean(nfsicJ10_stats > thresh, 0)
param_stats = nfsicJ10_stats[:, 3]
plt.hist(param_stats, normed=True)
dom = np.linspace(1e-1, np.max(param_stats)+2, 500)
chi2_den = stats.chi2.pdf(dom, df=J)
plt.plot(dom, chi2_den, '-')
"""
Explanation: Examine a result file
End of explanation
"""
|
dsacademybr/PythonFundamentos
|
Cap05/Notebooks/DSA-Python-Cap05-Exercicios-Solucao.ipynb
|
gpl-3.0
|
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
"""
Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 5</font>
Download: http://github.com/dsacademybr
End of explanation
"""
# Exercício 1 - Crie um objeto a partir da classe abaixo, chamado roc1, passando 2 parâmetros e depois faça uma chamada
# aos atributos e métodos
from math import sqrt
class Rocket():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def move_rocket(self, x_increment=0, y_increment=1):
self.x += x_increment
self.y += y_increment
def print_rocket(self):
print(self.x, self.y)
roc1 = Rocket(10,34)
roc1.x
roc1.y
roc1.print_rocket()
roc1.move_rocket(12, 44)
roc1.print_rocket()
# Exercício 2 - Crie uma classe chamada Pessoa() com os atributos: nome, cidade, telefone e e-mail. Use pelo menos 2
# métodos especiais na sua classe. Crie um objeto da sua classe e faça uma chamada a pelo menos um dos seus métodos
# especiais
class Pessoa():
def __init__(self, nome, cidade, telefone, email):
self.nome = nome
self.cidade = cidade
self.telefone = telefone
self.email = email
print("Objeto criado")
def __str__(self):
return "O usuário " + self.nome + " mora na cidade " + self.cidade
P1 = Pessoa("Pele", "Três Corações", 99887766, "pele@gmail.com")
str(P1)
# Exercício 3 - Crie a classe Smartphone com 2 atributos, tamanho e interface e crie a classe MP3Player com os
# atributos capacidade. A classe MP3player deve herdar os atributos da classe Smartphone.
class Smartphone(object):
def __init__(self, tamanho, interface):
self.tamanho = tamanho
self.interface = interface
class MP3Player(Smartphone):
def __init__(self, capacidade, tamanho = 'Pequeno', interface = 'Led'):
self.capacidade = capacidade
Smartphone.__init__(self, tamanho, interface)
def print_mp3player(self):
print("Valores para o objeto criado: %s %s %s" %(self.tamanho, self.interface, self.capacidade))
device1 = MP3Player('64 GB')
device1.print_mp3player()
"""
Explanation: Exercícios
End of explanation
"""
|
guyk1971/deep-learning
|
batch-norm/Batch_Normalization_Lesson_GK.ipynb
|
mit
|
# Import necessary packages
import tensorflow as tf
import tqdm
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Import MNIST data so we have something for our experiments
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../gan_mnist/MNIST_data/", one_hot=True) # GK: changed to the relevant folder
"""
Explanation: Batch Normalization – Lesson
What is it?
What are it's benefits?
How do we add it to a network?
Let's see it work!
What are you hiding?
What is Batch Normalization?<a id='theory'></a>
Batch normalization was introduced in Sergey Ioffe's and Christian Szegedy's 2015 paper Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift. The idea is that, instead of just normalizing the inputs to the network, we normalize the inputs to layers within the network. It's called "batch" normalization because during training, we normalize each layer's inputs by using the mean and variance of the values in the current mini-batch.
Why might this help? Well, we know that normalizing the inputs to a network helps the network learn. But a network is a series of layers, where the output of one layer becomes the input to another. That means we can think of any layer in a neural network as the first layer of a smaller network.
For example, imagine a 3 layer network. Instead of just thinking of it as a single network with inputs, layers, and outputs, think of the output of layer 1 as the input to a two layer network. This two layer network would consist of layers 2 and 3 in our original network.
Likewise, the output of layer 2 can be thought of as the input to a single layer network, consisting only of layer 3.
When you think of it like that - as a series of neural networks feeding into each other - then it's easy to imagine how normalizing the inputs to each layer would help. It's just like normalizing the inputs to any other neural network, but you're doing it at every layer (sub-network).
Beyond the intuitive reasons, there are good mathematical reasons why it helps the network learn better, too. It helps combat what the authors call internal covariate shift. This discussion is best handled in the paper and in Deep Learning a book you can read online written by Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Specifically, check out the batch normalization section of Chapter 8: Optimization for Training Deep Models.
Benefits of Batch Normalization<a id="benefits"></a>
Batch normalization optimizes network training. It has been shown to have several benefits:
1. Networks train faster – Each training iteration will actually be slower because of the extra calculations during the forward pass and the additional hyperparameters to train during back propagation. However, it should converge much more quickly, so training should be faster overall.
2. Allows higher learning rates – Gradient descent usually requires small learning rates for the network to converge. And as networks get deeper, their gradients get smaller during back propagation so they require even more iterations. Using batch normalization allows us to use much higher learning rates, which further increases the speed at which networks train.
3. Makes weights easier to initialize – Weight initialization can be difficult, and it's even more difficult when creating deeper networks. Batch normalization seems to allow us to be much less careful about choosing our initial starting weights.
4. Makes more activation functions viable – Some activation functions do not work well in some situations. Sigmoids lose their gradient pretty quickly, which means they can't be used in deep networks. And ReLUs often die out during training, where they stop learning completely, so we need to be careful about the range of values fed into them. Because batch normalization regulates the values going into each activation function, non-linearlities that don't seem to work well in deep networks actually become viable again.
5. Simplifies the creation of deeper networks – Because of the first 4 items listed above, it is easier to build and faster to train deeper neural networks when using batch normalization. And it's been shown that deeper networks generally produce better results, so that's great.
6. Provides a bit of regularlization – Batch normalization adds a little noise to your network. In some cases, such as in Inception modules, batch normalization has been shown to work as well as dropout. But in general, consider batch normalization as a bit of extra regularization, possibly allowing you to reduce some of the dropout you might add to a network.
7. May give better results overall – Some tests seem to show batch normalization actually improves the training results. However, it's really an optimization to help train faster, so you shouldn't think of it as a way to make your network better. But since it lets you train networks faster, that means you can iterate over more designs more quickly. It also lets you build deeper networks, which are usually better. So when you factor in everything, you're probably going to end up with better results if you build your networks with batch normalization.
Batch Normalization in TensorFlow<a id="implementation_1"></a>
This section of the notebook shows you one way to add batch normalization to a neural network built in TensorFlow.
The following cell imports the packages we need in the notebook and loads the MNIST dataset to use in our experiments. However, the tensorflow package contains all the code you'll actually need for batch normalization.
End of explanation
"""
class NeuralNet:
def __init__(self, initial_weights, activation_fn, use_batch_norm):
"""
Initializes this object, creating a TensorFlow graph using the given parameters.
:param initial_weights: list of NumPy arrays or Tensors
Initial values for the weights for every layer in the network. We pass these in
so we can create multiple networks with the same starting weights to eliminate
training differences caused by random initialization differences.
The number of items in the list defines the number of layers in the network,
and the shapes of the items in the list define the number of nodes in each layer.
e.g. Passing in 3 matrices of shape (784, 256), (256, 100), and (100, 10) would
create a network with 784 inputs going into a hidden layer with 256 nodes,
followed by a hidden layer with 100 nodes, followed by an output layer with 10 nodes.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param use_batch_norm: bool
Pass True to create a network that uses batch normalization; False otherwise
Note: this network will not use batch normalization on layers that do not have an
activation function.
"""
# Keep track of whether or not this network uses batch normalization.
self.use_batch_norm = use_batch_norm
self.name = "With Batch Norm" if use_batch_norm else "Without Batch Norm"
# Batch normalization needs to do different calculations during training and inference,
# so we use this placeholder to tell the graph which behavior to use.
self.is_training = tf.placeholder(tf.bool, name="is_training")
# This list is just for keeping track of data we want to plot later.
# It doesn't actually have anything to do with neural nets or batch normalization.
self.training_accuracies = []
# Create the network graph, but it will not actually have any real values until after you
# call train or test
self.build_network(initial_weights, activation_fn)
def build_network(self, initial_weights, activation_fn):
"""
Build the graph. The graph still needs to be trained via the `train` method.
:param initial_weights: list of NumPy arrays or Tensors
See __init__ for description.
:param activation_fn: Callable
See __init__ for description.
"""
self.input_layer = tf.placeholder(tf.float32, [None, initial_weights[0].shape[0]])
layer_in = self.input_layer
for weights in initial_weights[:-1]:
layer_in = self.fully_connected(layer_in, weights, activation_fn)
self.output_layer = self.fully_connected(layer_in, initial_weights[-1])
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
# Since this class supports both options, only use batch normalization when
# requested. However, do not use it on the final layer, which we identify
# by its lack of an activation function.
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
# (See later in the notebook for more details.)
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
# Apply batch normalization to the linear combination of the inputs and weights
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
# Now apply the activation function, *after* the normalization.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
def train(self, session, learning_rate, training_batches, batches_per_sample, save_model_as=None):
"""
Trains the model on the MNIST training dataset.
:param session: Session
Used to run training graph operations.
:param learning_rate: float
Learning rate used during gradient descent.
:param training_batches: int
Number of batches to train.
:param batches_per_sample: int
How many batches to train before sampling the validation accuracy.
:param save_model_as: string or None (default None)
Name to use if you want to save the trained model.
"""
# This placeholder will store the target labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define loss and optimizer
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.output_layer))
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#########################
# GK : the following if statement is not clear...
if self.use_batch_norm:
# If we don't include the update ops as dependencies on the train step, the
# tf.layers.batch_normalization layers won't update their population statistics,
# which will cause the model to fail at inference time
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
#####################
# Train for the appropriate number of batches. (tqdm is only for a nice timing display)
for i in tqdm.tqdm(range(training_batches)):
# We use batches of 60 just because the original paper did. You can use any size batch you like.
batch_xs, batch_ys = mnist.train.next_batch(60)
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
# Periodically test accuracy against the 5k validation images and store it for plotting later.
if i % batches_per_sample == 0:
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
self.training_accuracies.append(test_accuracy)
# After training, report accuracy against test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.validation.images,
labels: mnist.validation.labels,
self.is_training: False})
print('{}: After training, final accuracy on validation set = {}'.format(self.name, test_accuracy))
# If you want to use this model later for inference instead of having to retrain it,
# just construct it with the same parameters and then pass this file to the 'test' function
if save_model_as:
tf.train.Saver().save(session, save_model_as)
def test(self, session, test_training_accuracy=False, include_individual_predictions=False, restore_from=None):
"""
Trains a trained model on the MNIST testing dataset.
:param session: Session
Used to run the testing graph operations.
:param test_training_accuracy: bool (default False)
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
Note: in real life, *always* perform inference using the population mean and variance.
This parameter exists just to support demonstrating what happens if you don't.
:param include_individual_predictions: bool (default True)
This function always performs an accuracy test against the entire test set. But if this parameter
is True, it performs an extra test, doing 200 predictions one at a time, and displays the results
and accuracy.
:param restore_from: string or None (default None)
Name of a saved model if you want to test with previously saved weights.
"""
# This placeholder will store the true labels for each mini batch
labels = tf.placeholder(tf.float32, [None, 10])
# Define operations for testing
correct_prediction = tf.equal(tf.argmax(self.output_layer, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# If provided, restore from a previously saved model
if restore_from:
tf.train.Saver().restore(session, restore_from)
# Test against all of the MNIST test data
test_accuracy = session.run(accuracy, feed_dict={self.input_layer: mnist.test.images,
labels: mnist.test.labels,
self.is_training: test_training_accuracy})
print('-'*75)
print('{}: Accuracy on full test set = {}'.format(self.name, test_accuracy))
# If requested, perform tests predicting individual values rather than batches
if include_individual_predictions:
predictions = []
correct = 0
# Do 200 predictions, 1 at a time
for i in range(200):
# This is a normal prediction using an individual test case. However, notice
# we pass `test_training_accuracy` to `feed_dict` as the value for `self.is_training`.
# Remember that will tell it whether it should use the batch mean & variance or
# the population estimates that were calucated while training the model.
pred, corr = session.run([tf.arg_max(self.output_layer,1), accuracy],
feed_dict={self.input_layer: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
self.is_training: test_training_accuracy})
correct += corr
predictions.append(pred[0])
print("200 Predictions:", predictions)
print("Accuracy on 200 samples:", correct/200)
"""
Explanation: Neural network classes for testing
The following class, NeuralNet, allows us to create identical neural networks with and without batch normalization. The code is heavily documented, but there is also some additional discussion later. You do not need to read through it all before going through the rest of the notebook, but the comments within the code blocks may answer some of your questions.
About the code:
This class is not meant to represent TensorFlow best practices – the design choices made here are to support the discussion related to batch normalization.
It's also important to note that we use the well-known MNIST data for these examples, but the networks we create are not meant to be good for performing handwritten character recognition. We chose this network architecture because it is similar to the one used in the original paper, which is complex enough to demonstrate some of the benefits of batch normalization while still being fast to train.
End of explanation
"""
def plot_training_accuracies(*args, **kwargs):
"""
Displays a plot of the accuracies calculated during training to demonstrate
how many iterations it took for the model(s) to converge.
:param args: One or more NeuralNet objects
You can supply any number of NeuralNet objects as unnamed arguments
and this will display their training accuracies. Be sure to call `train`
the NeuralNets before calling this function.
:param kwargs:
You can supply any named parameters here, but `batches_per_sample` is the only
one we look for. It should match the `batches_per_sample` value you passed
to the `train` function.
"""
fig, ax = plt.subplots()
batches_per_sample = kwargs['batches_per_sample']
for nn in args:
ax.plot(range(0,len(nn.training_accuracies)*batches_per_sample,batches_per_sample),
nn.training_accuracies, label=nn.name)
ax.set_xlabel('Training steps')
ax.set_ylabel('Accuracy')
ax.set_title('Validation Accuracy During Training')
ax.legend(loc=4)
ax.set_ylim([0,1])
plt.yticks(np.arange(0, 1.1, 0.1))
plt.grid(True)
plt.show()
def train_and_test(use_bad_weights, learning_rate, activation_fn, training_batches=50000, batches_per_sample=500):
"""
Creates two networks, one with and one without batch normalization, then trains them
with identical starting weights, layers, batches, etc. Finally tests and plots their accuracies.
:param use_bad_weights: bool
If True, initialize the weights of both networks to wildly inappropriate weights;
if False, use reasonable starting weights.
:param learning_rate: float
Learning rate used during gradient descent.
:param activation_fn: Callable
The function used for the output of each hidden layer. The network will use the same
activation function on every hidden layer and no activate function on the output layer.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
:param training_batches: (default 50000)
Number of batches to train.
:param batches_per_sample: (default 500)
How many batches to train before sampling the validation accuracy.
"""
# Use identical starting weights for each network to eliminate differences in
# weight initialization as a cause for differences seen in training performance
#
# Note: The networks will use these weights to define the number of and shapes of
# its layers. The original batch normalization paper used 3 hidden layers
# with 100 nodes in each, followed by a 10 node output layer. These values
# build such a network, but feel free to experiment with different choices.
# However, the input size should always be 784 and the final output should be 10.
if use_bad_weights:
# These weights should be horrible because they have such a large standard deviation
weights = [np.random.normal(size=(784,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,100), scale=5.0).astype(np.float32),
np.random.normal(size=(100,10), scale=5.0).astype(np.float32)
]
else:
# These weights should be good because they have such a small standard deviation
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
# Just to make sure the TensorFlow's default graph is empty before we start another
# test, because we don't bother using different graphs or scoping and naming
# elements carefully in this sample code.
tf.reset_default_graph()
# build two versions of same network, 1 without and 1 with batch normalization
nn = NeuralNet(weights, activation_fn, False)
bn = NeuralNet(weights, activation_fn, True)
# train and test the two models
with tf.Session() as sess:
tf.global_variables_initializer().run()
nn.train(sess, learning_rate, training_batches, batches_per_sample)
bn.train(sess, learning_rate, training_batches, batches_per_sample)
nn.test(sess)
bn.test(sess)
# Display a graph of how validation accuracies changed during training
# so we can compare how the models trained and when they converged
plot_training_accuracies(nn, bn, batches_per_sample=batches_per_sample)
"""
Explanation: There are quite a few comments in the code, so those should answer most of your questions. However, let's take a look at the most important lines.
We add batch normalization to layers inside the fully_connected function. Here are some important points about that code:
1. Layers with batch normalization do not include a bias term.
2. We use TensorFlow's tf.layers.batch_normalization function to handle the math. (We show lower-level ways to do this later in the notebook.)
3. We tell tf.layers.batch_normalization whether or not the network is training. This is an important step we'll talk about later.
4. We add the normalization before calling the activation function.
GK: although there are blogs that claim it might be better to call batchnorm after the activation - right before the next layer. its not that clear what's the best approach.
In addition to that code, the training step is wrapped in the following with statement:
python
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
This line actually works in conjunction with the training parameter we pass to tf.layers.batch_normalization. Without it, TensorFlow's batch normalization layer will not operate correctly during inference.
Finally, whenever we train the network or perform inference, we use the feed_dict to set self.is_training to True or False, respectively, like in the following line:
python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
We'll go into more details later, but next we want to show some experiments that use this code and test networks with and without batch normalization.
Batch Normalization Demos<a id='demos'></a>
This section of the notebook trains various networks with and without batch normalization to demonstrate some of the benefits mentioned earlier.
We'd like to thank the author of this blog post Implementing Batch Normalization in TensorFlow. That post provided the idea of - and some of the code for - plotting the differences in accuracy during training, along with the idea for comparing multiple networks using the same initial weights.
Code to support testing
The following two functions support the demos we run in the notebook.
The first function, plot_training_accuracies, simply plots the values found in the training_accuracies lists of the NeuralNet objects passed to it. If you look at the train function in NeuralNet, you'll see it that while it's training the network, it periodically measures validation accuracy and stores the results in that list. It does that just to support these plots.
The second function, train_and_test, creates two neural nets - one with and one without batch normalization. It then trains them both and tests them, calling plot_training_accuracies to plot how their accuracies changed over the course of training. The really imporant thing about this function is that it initializes the starting weights for the networks outside of the networks and then passes them in. This lets it train both networks from the exact same starting weights, which eliminates performance differences that might result from (un)lucky initial weights.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.relu)
"""
Explanation: Comparisons between identical networks, with and without batch normalization
The next series of cells train networks with various settings to show the differences with and without batch normalization. They are meant to clearly demonstrate the effects of batch normalization. We include a deeper discussion of batch normalization later in the notebook.
The following creates two networks using a ReLU activation function, a learning rate of 0.01, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.relu, 2000, 50)
"""
Explanation: As expected, both networks train well and eventually reach similar test accuracies. However, notice that the model with batch normalization converges slightly faster than the other network, reaching accuracies over 90% almost immediately and nearing its max acuracy in 10 or 15 thousand iterations. The other network takes about 3 thousand iterations to reach 90% and doesn't near its best accuracy until 30 thousand or more iterations.
If you look at the raw speed, you can see that without batch normalization we were computing over 1100 batches per second, whereas with batch normalization that goes down to just over 500. However, batch normalization allows us to perform fewer iterations and converge in less time over all. (We only trained for 50 thousand batches here so we could plot the comparison.)
The following creates two networks with the same hyperparameters used in the previous example, but only trains for 2000 iterations.
End of explanation
"""
train_and_test(False, 0.01, tf.nn.sigmoid)
"""
Explanation: As you can see, using batch normalization produces a model with over 95% accuracy in only 2000 batches, and it was above 90% at somewhere around 500 batches. Without batch normalization, the model takes 1750 iterations just to hit 80% – the network with batch normalization hits that mark after around 200 iterations! (Note: if you run the code yourself, you'll see slightly different results each time because the starting weights - while the same for each model - are different for each run.)
In the above example, you should also notice that the networks trained fewer batches per second then what you saw in the previous example. That's because much of the time we're tracking is actually spent periodically performing inference to collect data for the plots. In this example we perform that inference every 50 batches instead of every 500, so generating the plot for this example requires 10 times the overhead for the same 2000 iterations.
The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.relu)
"""
Explanation: With the number of layers we're using and this small learning rate, using a sigmoid activation function takes a long time to start learning. It eventually starts making progress, but it took over 45 thousand batches just to get over 80% accuracy. Using batch normalization gets to 90% in around one thousand batches.
The following creates two networks using a ReLU activation function, a learning rate of 1, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.relu)
"""
Explanation: Now we're using ReLUs again, but with a larger learning rate. The plot shows how training started out pretty normally, with the network with batch normalization starting out faster than the other. But the higher learning rate bounces the accuracy around a bit more, and at some point the accuracy in the network without batch normalization just completely crashes. It's likely that too many ReLUs died off at this point because of the high learning rate.
The next cell shows the same test again. The network with batch normalization performs the same way, and the other suffers from the same problem again, but it manages to train longer before it happens.
End of explanation
"""
train_and_test(False, 1, tf.nn.sigmoid)
"""
Explanation: In both of the previous examples, the network with batch normalization manages to gets over 98% accuracy, and get near that result almost immediately. The higher learning rate allows the network to train extremely fast.
The following creates two networks using a sigmoid activation function, a learning rate of 1, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 1, tf.nn.sigmoid, 2000, 50)
"""
Explanation: In this example, we switched to a sigmoid activation function. It appears to hande the higher learning rate well, with both networks achieving high accuracy.
The cell below shows a similar pair of networks trained for only 2000 iterations.
End of explanation
"""
train_and_test(False, 2, tf.nn.relu)
"""
Explanation: As you can see, even though these parameters work well for both networks, the one with batch normalization gets over 90% in 400 or so batches, whereas the other takes over 1700. When training larger networks, these sorts of differences become more pronounced.
The following creates two networks using a ReLU activation function, a learning rate of 2, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 2, tf.nn.sigmoid)
"""
Explanation: With this very large learning rate, the network with batch normalization trains fine and almost immediately manages 98% accuracy. However, the network without normalization doesn't learn at all.
The following creates two networks using a sigmoid activation function, a learning rate of 2, and reasonable starting weights.
End of explanation
"""
train_and_test(False, 2, tf.nn.sigmoid, 2000, 50)
"""
Explanation: Once again, using a sigmoid activation function with the larger learning rate works well both with and without batch normalization.
However, look at the plot below where we train models with the same parameters but only 2000 iterations. As usual, batch normalization lets it train faster.
End of explanation
"""
train_and_test(True, 0.01, tf.nn.relu)
"""
Explanation: In the rest of the examples, we use really bad starting weights. That is, normally we would use very small values close to zero. However, in these examples we choose random values with a standard deviation of 5. If you were really training a neural network, you would not want to do this. But these examples demonstrate how batch normalization makes your network much more resilient.
The following creates two networks using a ReLU activation function, a learning rate of 0.01, and bad starting weights.
End of explanation
"""
train_and_test(True, 0.01, tf.nn.sigmoid)
"""
Explanation: As the plot shows, without batch normalization the network never learns anything at all. But with batch normalization, it actually learns pretty well and gets to almost 80% accuracy. The starting weights obviously hurt the network, but you can see how well batch normalization does in overcoming them.
The following creates two networks using a sigmoid activation function, a learning rate of 0.01, and bad starting weights.
End of explanation
"""
train_and_test(True, 1, tf.nn.relu)
"""
Explanation: Using a sigmoid activation function works better than the ReLU in the previous example, but without batch normalization it would take a tremendously long time to train the network, if it ever trained at all.
The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.<a id="successful_example_lr_1"></a>
End of explanation
"""
train_and_test(True, 1, tf.nn.sigmoid)
"""
Explanation: The higher learning rate used here allows the network with batch normalization to surpass 90% in about 30 thousand batches. The network without it never gets anywhere.
The following creates two networks using a sigmoid activation function, a learning rate of 1, and bad starting weights.
End of explanation
"""
train_and_test(True, 2, tf.nn.relu)
"""
Explanation: Using sigmoid works better than ReLUs for this higher learning rate. However, you can see that without batch normalization, the network takes a long time tro train, bounces around a lot, and spends a long time stuck at 90%. The network with batch normalization trains much more quickly, seems to be more stable, and achieves a higher accuracy.
The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.<a id="successful_example_lr_2"></a>
End of explanation
"""
train_and_test(True, 2, tf.nn.sigmoid)
"""
Explanation: We've already seen that ReLUs do not do as well as sigmoids with higher learning rates, and here we are using an extremely high rate. As expected, without batch normalization the network doesn't learn at all. But with batch normalization, it eventually achieves 90% accuracy. Notice, though, how its accuracy bounces around wildly during training - that's because the learning rate is really much too high, so the fact that this worked at all is a bit of luck.
The following creates two networks using a sigmoid activation function, a learning rate of 2, and bad starting weights.
End of explanation
"""
train_and_test(True, 1, tf.nn.relu)
"""
Explanation: In this case, the network with batch normalization trained faster and reached a higher accuracy. Meanwhile, the high learning rate makes the network without normalization bounce around erratically and have trouble getting past 90%.
Full Disclosure: Batch Normalization Doesn't Fix Everything
Batch normalization isn't magic and it doesn't work every time. Weights are still randomly initialized and batches are chosen at random during training, so you never know exactly how training will go. Even for these tests, where we use the same initial weights for both networks, we still get different weights each time we run.
This section includes two examples that show runs when batch normalization did not help at all.
The following creates two networks using a ReLU activation function, a learning rate of 1, and bad starting weights.
End of explanation
"""
train_and_test(True, 2, tf.nn.relu)
"""
Explanation: When we used these same parameters earlier, we saw the network with batch normalization reach 92% validation accuracy. This time we used different starting weights, initialized using the same standard deviation as before, and the network doesn't learn at all. (Remember, an accuracy around 10% is what the network gets if it just guesses the same value all the time.)
The following creates two networks using a ReLU activation function, a learning rate of 2, and bad starting weights.
End of explanation
"""
def fully_connected(self, layer_in, initial_weights, activation_fn=None):
"""
Creates a standard, fully connected layer. Its number of inputs and outputs will be
defined by the shape of `initial_weights`, and its starting weight values will be
taken directly from that same parameter. If `self.use_batch_norm` is True, this
layer will include batch normalization, otherwise it will not.
:param layer_in: Tensor
The Tensor that feeds into this layer. It's either the input to the network or the output
of a previous layer.
:param initial_weights: NumPy array or Tensor
Initial values for this layer's weights. The shape defines the number of nodes in the layer.
e.g. Passing in 3 matrix of shape (784, 256) would create a layer with 784 inputs and 256
outputs.
:param activation_fn: Callable or None (default None)
The non-linearity used for the output of the layer. If None, this layer will not include
batch normalization, regardless of the value of `self.use_batch_norm`.
e.g. Pass tf.nn.relu to use ReLU activations on your hidden layers.
"""
if self.use_batch_norm and activation_fn:
# Batch normalization uses weights as usual, but does NOT add a bias term. This is because
# its calculations include gamma and beta variables that make the bias term unnecessary.
weights = tf.Variable(initial_weights)
linear_output = tf.matmul(layer_in, weights)
num_out_nodes = initial_weights.shape[-1]
# Batch normalization adds additional trainable variables:
# gamma (for scaling) and beta (for shifting).
gamma = tf.Variable(tf.ones([num_out_nodes]))
beta = tf.Variable(tf.zeros([num_out_nodes]))
# These variables will store the mean and variance for this layer over the entire training set,
# which we assume represents the general population distribution.
# By setting `trainable=False`, we tell TensorFlow not to modify these variables during
# back propagation. Instead, we will assign values to these variables ourselves.
pop_mean = tf.Variable(tf.zeros([num_out_nodes]), trainable=False)
pop_variance = tf.Variable(tf.ones([num_out_nodes]), trainable=False)
# Batch normalization requires a small constant epsilon, used to ensure we don't divide by zero.
# This is the default value TensorFlow uses.
epsilon = 1e-3
def batch_norm_training():
# Calculate the mean and variance for the data coming out of this layer's linear-combination step.
# The [0] defines an array of axes to calculate over.
batch_mean, batch_variance = tf.nn.moments(linear_output, [0])
# Calculate a moving average of the training data's mean and variance while training.
# These will be used during inference.
# Decay should be some number less than 1. tf.layers.batch_normalization uses the parameter
# "momentum" to accomplish this and defaults it to 0.99
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# The 'tf.control_dependencies' context tells TensorFlow it must calculate 'train_mean'
# and 'train_variance' before it calculates the 'tf.nn.batch_normalization' layer.
# This is necessary because the those two operations are not actually in the graph
# connecting the linear_output and batch_normalization layers,
# so TensorFlow would otherwise just skip them.
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
# During inference, use the our estimated population mean and variance to normalize the layer
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
# Use `tf.cond` as a sort of if-check. When self.is_training is True, TensorFlow will execute
# the operation returned from `batch_norm_training`; otherwise it will execute the graph
# operation returned from `batch_norm_inference`.
batch_normalized_output = tf.cond(self.is_training, batch_norm_training, batch_norm_inference)
# Pass the batch-normalized layer output through the activation function.
# The literature states there may be cases where you want to perform the batch normalization *after*
# the activation function, but it is difficult to find any uses of that in practice.
return activation_fn(batch_normalized_output)
else:
# When not using batch normalization, create a standard layer that multiplies
# the inputs and weights, adds a bias, and optionally passes the result
# through an activation function.
weights = tf.Variable(initial_weights)
biases = tf.Variable(tf.zeros([initial_weights.shape[-1]]))
linear_output = tf.add(tf.matmul(layer_in, weights), biases)
return linear_output if not activation_fn else activation_fn(linear_output)
"""
Explanation: When we trained with these parameters and batch normalization earlier, we reached 90% validation accuracy. However, this time the network almost starts to make some progress in the beginning, but it quickly breaks down and stops learning.
Note: Both of the above examples use extremely bad starting weights, along with learning rates that are too high. While we've shown batch normalization can overcome bad values, we don't mean to encourage actually using them. The examples in this notebook are meant to show that batch normalization can help your networks train better. But these last two examples should remind you that you still want to try to use good network design choices and reasonable starting weights. It should also remind you that the results of each attempt to train a network are a bit random, even when using otherwise identical architectures.
Batch Normalization: A Detailed Look<a id='implementation_2'></a>
The layer created by tf.layers.batch_normalization handles all the details of implementing batch normalization. Many students will be fine just using that and won't care about what's happening at the lower levels. However, some students may want to explore the details, so here is a short explanation of what's really happening, starting with the equations you're likely to come across if you ever read about batch normalization.
In order to normalize the values, we first need to find the average value for the batch. If you look at the code, you can see that this is not the average value of the batch inputs, but the average value coming out of any particular layer before we pass it through its non-linear activation function and then feed it as an input to the next layer.
We represent the average as $\mu_B$, which is simply the sum of all of the values $x_i$ divided by the number of values, $m$
$$
\mu_B \leftarrow \frac{1}{m}\sum_{i=1}^m x_i
$$
We then need to calculate the variance, or mean squared deviation, represented as $\sigma_{B}^{2}$. If you aren't familiar with statistics, that simply means for each value $x_i$, we subtract the average value (calculated earlier as $\mu_B$), which gives us what's called the "deviation" for that value. We square the result to get the squared deviation. Sum up the results of doing that for each of the values, then divide by the number of values, again $m$, to get the average, or mean, squared deviation.
$$
\sigma_{B}^{2} \leftarrow \frac{1}{m}\sum_{i=1}^m (x_i - \mu_B)^2
$$
Once we have the mean and variance, we can use them to normalize the values with the following equation. For each value, it subtracts the mean and divides by the (almost) standard deviation. (You've probably heard of standard deviation many times, but if you have not studied statistics you might not know that the standard deviation is actually the square root of the mean squared deviation.)
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
Above, we said "(almost) standard deviation". That's because the real standard deviation for the batch is calculated by $\sqrt{\sigma_{B}^{2}}$, but the above formula adds the term epsilon, $\epsilon$, before taking the square root. The epsilon can be any small, positive constant - in our code we use the value 0.001. It is there partially to make sure we don't try to divide by zero, but it also acts to increase the variance slightly for each batch.
Why increase the variance? Statistically, this makes sense because even though we are normalizing one batch at a time, we are also trying to estimate the population distribution – the total training set, which itself an estimate of the larger population of inputs your network wants to handle. The variance of a population is higher than the variance for any sample taken from that population, so increasing the variance a little bit for each batch helps take that into account.
At this point, we have a normalized value, represented as $\hat{x_i}$. But rather than use it directly, we multiply it by a gamma value, $\gamma$, and then add a beta value, $\beta$. Both $\gamma$ and $\beta$ are learnable parameters of the network and serve to scale and shift the normalized value, respectively. Because they are learnable just like weights, they give your network some extra knobs to tweak during training to help it learn the function it is trying to approximate.
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We now have the final batch-normalized output of our layer, which we would then pass to a non-linear activation function like sigmoid, tanh, ReLU, Leaky ReLU, etc. In the original batch normalization paper (linked in the beginning of this notebook), they mention that there might be cases when you'd want to perform the batch normalization after the non-linearity instead of before, but it is difficult to find any uses like that in practice.
In NeuralNet's implementation of fully_connected, all of this math is hidden inside the following line, where linear_output serves as the $x_i$ from the equations:
python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
The next section shows you how to implement the math directly.
Batch normalization without the tf.layers package
Our implementation of batch normalization in NeuralNet uses the high-level abstraction tf.layers.batch_normalization, found in TensorFlow's tf.layers package.
However, if you would like to implement batch normalization at a lower level, the following code shows you how.
It uses tf.nn.batch_normalization from TensorFlow's neural net (nn) package.
1) You can replace the fully_connected function in the NeuralNet class with the below code and everything in NeuralNet will still work like it did before.
End of explanation
"""
def batch_norm_test(test_training_accuracy):
"""
:param test_training_accuracy: bool
If True, perform inference with batch normalization using batch mean and variance;
if False, perform inference with batch normalization using estimated population mean and variance.
"""
weights = [np.random.normal(size=(784,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,100), scale=0.05).astype(np.float32),
np.random.normal(size=(100,10), scale=0.05).astype(np.float32)
]
tf.reset_default_graph()
# Train the model
bn = NeuralNet(weights, tf.nn.relu, True)
# First train the network
with tf.Session() as sess:
tf.global_variables_initializer().run()
bn.train(sess, 0.01, 2000, 2000)
bn.test(sess, test_training_accuracy=test_training_accuracy, include_individual_predictions=True)
"""
Explanation: This version of fully_connected is much longer than the original, but once again has extensive comments to help you understand it. Here are some important points:
It explicitly creates variables to store gamma, beta, and the population mean and variance. These were all handled for us in the previous version of the function.
It initializes gamma to one and beta to zero, so they start out having no effect in this calculation: $y_i \leftarrow \gamma \hat{x_i} + \beta$. However, during training the network learns the best values for these variables using back propagation, just like networks normally do with weights.
Unlike gamma and beta, the variables for population mean and variance are marked as untrainable. That tells TensorFlow not to modify them during back propagation. Instead, the lines that call tf.assign are used to update these variables directly.
TensorFlow won't automatically run the tf.assign operations during training because it only evaluates operations that are required based on the connections it finds in the graph. To get around that, we add this line: with tf.control_dependencies([train_mean, train_variance]): before we run the normalization operation. This tells TensorFlow it needs to run those operations before running anything inside the with block.
The actual normalization math is still mostly hidden from us, this time using tf.nn.batch_normalization.
tf.nn.batch_normalization does not have a training parameter like tf.layers.batch_normalization did. However, we still need to handle training and inference differently, so we run different code in each case using the tf.cond operation.
We use the tf.nn.moments function to calculate the batch mean and variance.
2) The current version of the train function in NeuralNet will work fine with this new version of fully_connected. However, it uses these lines to ensure population statistics are updated when using batch normalization:
python
if self.use_batch_norm:
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
Our new version of fully_connected handles updating the population statistics directly. That means you can also simplify your code by replacing the above if/else condition with just this line:
python
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
3) And just in case you want to implement every detail from scratch, you can replace this line in batch_norm_training:
python
return tf.nn.batch_normalization(linear_output, batch_mean, batch_variance, beta, gamma, epsilon)
with these lines:
python
normalized_linear_output = (linear_output - batch_mean) / tf.sqrt(batch_variance + epsilon)
return gamma * normalized_linear_output + beta
And replace this line in batch_norm_inference:
python
return tf.nn.batch_normalization(linear_output, pop_mean, pop_variance, beta, gamma, epsilon)
with these lines:
python
normalized_linear_output = (linear_output - pop_mean) / tf.sqrt(pop_variance + epsilon)
return gamma * normalized_linear_output + beta
As you can see in each of the above substitutions, the two lines of replacement code simply implement the following two equations directly. The first line calculates the following equation, with linear_output representing $x_i$ and normalized_linear_output representing $\hat{x_i}$:
$$
\hat{x_i} \leftarrow \frac{x_i - \mu_B}{\sqrt{\sigma_{B}^{2} + \epsilon}}
$$
And the second line is a direct translation of the following equation:
$$
y_i \leftarrow \gamma \hat{x_i} + \beta
$$
We still use the tf.nn.moments operation to implement the other two equations from earlier – the ones that calculate the batch mean and variance used in the normalization step. If you really wanted to do everything from scratch, you could replace that line, too, but we'll leave that to you.
Why the difference between training and inference?
In the original function that uses tf.layers.batch_normalization, we tell the layer whether or not the network is training by passing a value for its training parameter, like so:
python
batch_normalized_output = tf.layers.batch_normalization(linear_output, training=self.is_training)
And that forces us to provide a value for self.is_training in our feed_dict, like we do in this example from NeuralNet's train function:
python
session.run(train_step, feed_dict={self.input_layer: batch_xs,
labels: batch_ys,
self.is_training: True})
If you looked at the low level implementation, you probably noticed that, just like with tf.layers.batch_normalization, we need to do slightly different things during training and inference. But why is that?
First, let's look at what happens when we don't. The following function is similar to train_and_test from earlier, but this time we are only testing one network and instead of plotting its accuracy, we perform 200 predictions on test inputs, 1 input at at time. We can use the test_training_accuracy parameter to test the network in training or inference modes (the equivalent of passing True or False to the feed_dict for is_training).
End of explanation
"""
batch_norm_test(True)
"""
Explanation: In the following cell, we pass True for test_training_accuracy, which performs the same batch normalization that we normally perform during training.
End of explanation
"""
batch_norm_test(False)
"""
Explanation: As you can see, the network guessed the same value every time! But why? Because during training, a network with batch normalization adjusts the values at each layer based on the mean and variance of that batch. The "batches" we are using for these predictions have a single input each time, so their values are the means, and their variances will always be 0. That means the network will normalize the values at any layer to zero. (Review the equations from before to see why a value that is equal to the mean would always normalize to zero.) So we end up with the same result for every input we give the network, because its the value the network produces when it applies its learned weights to zeros at every layer.
Note: If you re-run that cell, you might get a different value from what we showed. That's because the specific weights the network learns will be different every time. But whatever value it is, it should be the same for all 200 predictions.
To overcome this problem, the network does not just normalize the batch at each layer. It also maintains an estimate of the mean and variance for the entire population. So when we perform inference, instead of letting it "normalize" all the values using their own means and variance, it uses the estimates of the population mean and variance that it calculated while training.
So in the following example, we pass False for test_training_accuracy, which tells the network that we it want to perform inference with the population statistics it calculates during training.
End of explanation
"""
|
bloomberg/bqplot
|
examples/Interactions/Mark Interactions.ipynb
|
apache-2.0
|
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(20)
y_data = np.random.randn(20)
scatter_chart = Scatter(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
colors=["dodgerblue"],
interactions={"click": "select"},
selected_style={"opacity": 1.0, "fill": "DarkOrange", "stroke": "Red"},
unselected_style={"opacity": 0.5},
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[scatter_chart], axes=[ax_x, ax_y])
scatter_chart.selected
"""
Explanation: Scatter Chart
Scatter Chart Selections
Click a point on the Scatter plot to select it. Now, run the cell below to check the selection. After you've done this, try holding the ctrl (or command key on Mac) and clicking another point. Clicking the background will reset the selection.
End of explanation
"""
scatter_chart.selected = [1, 2, 3]
"""
Explanation: Alternately, the selected attribute can be directly set on the Python side (try running the cell below):
End of explanation
"""
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(20)
y_data = np.random.randn(20)
dd = Dropdown(options=["First", "Second", "Third", "Fourth"])
scatter_chart = Scatter(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
colors=["dodgerblue"],
names=np.arange(100, 200),
names_unique=False,
display_names=False,
display_legend=True,
labels=["Blue"],
)
ins = Button(icon="fa-legal")
scatter_chart.tooltip = ins
line = Lines(x=x_data, y=y_data, scales={"x": x_sc, "y": y_sc}, colors=["dodgerblue"])
scatter_chart2 = Scatter(
x=x_data,
y=np.random.randn(20),
scales={"x": x_sc, "y": y_sc},
colors=["orangered"],
tooltip=dd,
names=np.arange(100, 200),
names_unique=False,
display_names=False,
display_legend=True,
labels=["Red"],
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
fig = Figure(marks=[scatter_chart, scatter_chart2, line], axes=[ax_x, ax_y])
fig
def print_event(self, target):
print(target)
# Adding call back to scatter events
# print custom mssg on hover and background click of Blue Scatter
scatter_chart.on_hover(print_event)
scatter_chart.on_background_click(print_event)
# print custom mssg on click of an element or legend of Red Scatter
scatter_chart2.on_element_click(print_event)
scatter_chart2.on_legend_click(print_event)
line.on_element_click(print_event)
# Changing interaction from hover to click for tooltip
scatter_chart.interactions = {"click": "tooltip"}
# Adding figure as tooltip
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(10)
y_data = np.random.randn(10)
lc = Lines(x=x_data, y=y_data, scales={"x": x_sc, "y": y_sc})
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
tooltip_fig = Figure(marks=[lc], axes=[ax_x, ax_y], layout=Layout(min_width="600px"))
scatter_chart.tooltip = tooltip_fig
"""
Explanation: Scatter Chart Interactions and Tooltips
End of explanation
"""
i = ImageIpy.from_file(os.path.abspath("../data_files/trees.jpg"))
bqi = Image(image=i, scales={"x": x_sc, "y": y_sc}, x=(0, 10), y=(-1, 1))
fig_image = Figure(marks=[bqi], axes=[ax_x, ax_y])
fig_image
bqi.on_element_click(print_event)
"""
Explanation: Image
For images, on_element_click returns the location of the mouse click.
End of explanation
"""
# Adding default tooltip to Line Chart
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(100)
y_data = np.random.randn(3, 100)
def_tt = Tooltip(
fields=["name", "index"], formats=["", ".2f"], labels=["id", "line_num"]
)
line_chart = Lines(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
tooltip=def_tt,
display_legend=True,
labels=["line 1", "line 2", "line 3"],
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[line_chart], axes=[ax_x, ax_y])
# Adding call back to print event when legend or the line is clicked
line_chart.on_legend_click(print_event)
line_chart.on_element_click(print_event)
"""
Explanation: Line Chart
End of explanation
"""
# Adding interaction to select bar on click for Bar Chart
x_sc = OrdinalScale()
y_sc = LinearScale()
x_data = np.arange(10)
y_data = np.random.randn(2, 10)
bar_chart = Bars(
x=x_data,
y=[y_data[0, :].tolist(), y_data[1, :].tolist()],
scales={"x": x_sc, "y": y_sc},
interactions={"click": "select"},
selected_style={"stroke": "orange", "fill": "red"},
labels=["Level 1", "Level 2"],
display_legend=True,
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[bar_chart], axes=[ax_x, ax_y])
# Adding a tooltip on hover in addition to select on click
def_tt = Tooltip(fields=["x", "y"], formats=["", ".2f"])
bar_chart.tooltip = def_tt
bar_chart.interactions = {
"legend_hover": "highlight_axes",
"hover": "tooltip",
"click": "select",
}
# Changing tooltip to be on click
bar_chart.interactions = {"click": "tooltip"}
# Call back on legend being clicked
bar_chart.type = "grouped"
bar_chart.on_legend_click(print_event)
"""
Explanation: Bar Chart
End of explanation
"""
# Adding tooltip for Histogram
x_sc = LinearScale()
y_sc = LinearScale()
sample_data = np.random.randn(100)
def_tt = Tooltip(formats=["", ".2f"], fields=["count", "midpoint"])
hist = Hist(
sample=sample_data,
scales={"sample": x_sc, "count": y_sc},
tooltip=def_tt,
display_legend=True,
labels=["Test Hist"],
select_bars=True,
)
ax_x = Axis(scale=x_sc, tick_format="0.2f")
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[hist], axes=[ax_x, ax_y])
# Changing tooltip to be displayed on click
hist.interactions = {"click": "tooltip"}
# Changing tooltip to be on click of legend
hist.interactions = {"legend_click": "tooltip"}
"""
Explanation: Histogram
End of explanation
"""
pie_data = np.abs(np.random.randn(10))
sc = ColorScale(scheme="Reds")
tooltip_widget = Tooltip(
fields=["size", "index", "color"], formats=["0.2f", "", "0.2f"]
)
pie = Pie(
sizes=pie_data,
scales={"color": sc},
color=np.random.randn(10),
tooltip=tooltip_widget,
interactions={"click": "tooltip"},
selected_style={"fill": "red"},
)
pie.selected_style = {"opacity": "1", "stroke": "white", "stroke-width": "2"}
pie.unselected_style = {"opacity": "0.2"}
Figure(marks=[pie])
# Changing interaction to select on click and tooltip on hover
pie.interactions = {"click": "select", "hover": "tooltip"}
"""
Explanation: Pie Chart
Set up a pie chart with click to show the tooltip.
End of explanation
"""
|
GoogleCloudPlatform/ml-design-patterns
|
06_reproducibility/feature_store.ipynb
|
apache-2.0
|
import os
# Feast Core acts as the central feature registry
FEAST_CORE_URL = os.getenv('FEAST_CORE_URL', 'localhost:6565')
# Feast Online Serving allows for the retrieval of real-time feature data
FEAST_ONLINE_SERVING_URL = os.getenv('FEAST_ONLINE_SERVING_URL', 'localhost:6566')
# Feast Batch Serving allows for the retrieval of historical feature data
FEAST_BATCH_SERVING_URL = os.getenv('FEAST_BATCH_SERVING_URL', 'localhost:6567')
"""
Explanation: Feature Store using FEAST
Feast (Feature Store) is a tool for managing and serving machine learning features.
To execute this notebook, you'll first need to install Feast and connect to an existing deployment.
To get started, follow the instructions in this Getting Started guide here.
In short, you will need to:
Install docker-compose if it's not already on your machine. You can also deploy Feast using GKE which is better suited for production purposes.
Install feast
python
pip install feast
Clone the Feast repository and navigate to the infra/docker-compose sub-directory
python
git clone https://github.com/feast-dev/feast.git
python
cd feast/infra/docker-compose
Make a copy of the .env.sample file
python
cp .env.sample .env
Create a service account and copy it to the infra/docker-compose/gcp-service-accounts folder
Create a GCS bucket to use for staging
bash
gsutil mb gs://my-feast-staging-bucket
Configure the .env file to reference your service key:
FEAST_CORE_GCP_SERVICE_ACCOUNT_KEY
FEAST_BATCH_SERVING_GCP_SERVICE_ACCOUNT_KEY
FEAST_JUPYTER_GCP_SERVICE_ACCOUNT_KEY
Configure the following fields in the feast/infra/docker-compose/serving/batch-serving.yml file:
feast.stores.config.project_id
feast.stores.config.dataset_id
feast.stores.config.staging_location
Start Feast:
bash
docker-compose \
-f docker-compose.yml \
-f docker-compose.online.yml \
-f docker-compose.batch.yml \
up -d
Configuration
Set up the serving clients for offline and batch feature retrieval.
End of explanation
"""
#!pip install --user feast
#!pip install --user xgboost
import pandas as pd
import numpy as np
from pytz import timezone, utc
from feast import Client, FeatureSet, Entity, ValueType
from feast.serving.ServingService_pb2 import GetOnlineFeaturesRequest
from feast.types.Value_pb2 import Value as Value
from google.protobuf.duration_pb2 import Duration
from datetime import datetime, timedelta
from random import randrange
import random
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
"""
Explanation: Import libraries and modules
End of explanation
"""
!head taxi-train.csv
COL_NAMES = ['fare_amount', 'pickup_datetime', 'pickup_longitude', \
'pickup_latitude', 'dropoff_longitude', 'dropoff_latitude', 'passenger_count', 'taxi_id']
taxi_df = pd.read_csv('taxi-train.csv', names=COL_NAMES)
taxi_df.head()
# needs datetime field in datetime[ns] format
# create a datetime field from pickup_datetime
taxi_datetime = pd.to_datetime(taxi_df.pickup_datetime, unit='ns', utc=True)
taxi_df.insert(2, "datetime", taxi_datetime, True)
taxi_df.head()
"""
Explanation: Ingesting features into Feast
Read in taxifare data
Read in the taxifare data from .csv file and inspect with Pandas.
End of explanation
"""
def compute_dist(row):
lat1, lon1 = row.pickup_latitude, row.pickup_longitude
lat2, lon2 = row.dropoff_latitude, row.dropoff_longitude
londiff = lon2 - lon1
latdiff = lat2 - lat1
return np.sqrt(londiff*londiff + latdiff*latdiff)
taxi_df['euclid_dist'] = taxi_df.apply(compute_dist, axis=1)
taxi_df.head()
"""
Explanation: Create new features: Euclidean distance
Engineer an additional feature which provides the Euclidean distance from pickup location to dropoff location.
End of explanation
"""
# Connect to FEAST core
client = Client(core_url=FEAST_CORE_URL)
client.list_feature_sets()
FS_NAME = "taxirides"
taxi_fs = FeatureSet("taxirides")
taxi_fs.infer_fields_from_df(taxi_df,
entities=[Entity(name='taxi_id', dtype=ValueType.INT64)],
replace_existing_features=True)
client.apply(taxi_fs)
client.list_feature_sets()
print(client.get_feature_set('taxirides'))
client.ingest(taxi_fs, taxi_df)
"""
Explanation: Connect to FEAST and create a FeatureSet with this dataframe
End of explanation
"""
_feast_batch_client = Client(serving_url=FEAST_BATCH_SERVING_URL,
core_url=FEAST_CORE_URL)
model_features = ['pickup_latitude',
'pickup_longitude',
'dropoff_latitude',
'dropoff_longitude',
'passenger_count',
'euclid_dist']
target = 'fare_amount'
# Add the target variable to our feature list
features = model_features + [target]
"""
Explanation: Retrieving feature stores from Feast
Get batch features for training
To access historical/office features, we'll set up a Feast serving batch client.
End of explanation
"""
taxis = taxi_df.taxi_id.unique()
days = taxi_df.datetime.unique()
entity_df = pd.DataFrame(
{
"datetime": [day for day in days for taxi in taxis],
"taxi_id": [taxi for day in days for taxi in taxis],
}
)
entity_df.shape
FS_NAME = "taxirides"
# Retrieve training dataset from Feast
dataset = _feast_batch_client.get_batch_features(
feature_refs=[FS_NAME + ":" + feature for feature in features],
entity_rows=entity_df).to_dataframe()
dataset.dropna(inplace=True) # not all pairing of datetime and taxi_id have entry
dataset.head()
x_train, x_test, y_train, y_test = \
train_test_split(dataset[[FS_NAME + "__" + feature for feature in model_features]],
dataset[FS_NAME + "__" + target],
test_size=0.25, random_state=42)
model = XGBRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=1, gamma=0,
importance_type='gain', learning_rate=0.1, max_delta_step=0,
max_depth=3, min_child_weight=1, missing=None, n_estimators=100,
n_jobs=1, nthread=None, objective='reg:squarederror', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
# Next, we'll fit the model with training data.
model.fit(x_train, y_train)
train_score = model.score(x_train, y_train)
test_score = model.score(x_test, y_test)
print("Training score: ", train_score)
print("Testing score: ", test_score)
"""
Explanation: To pull batch features, we provide an entity dataframe that contains the entities and timestamps we want to retrieve. We'll provide every pairing to get all offline features for training.
End of explanation
"""
_feast_online_client = Client(serving_url=FEAST_ONLINE_SERVING_URL)
# for a single taxi_id
taxi_id = 1
online_features = _feast_online_client.get_online_features(
feature_refs=[FS_NAME + ":" + feature for feature in model_features],
entity_rows=[
GetOnlineFeaturesRequest.EntityRow(
fields={
"taxi_id": Value(
int64_val=taxi_id)
}
)
],
)
print(online_features)
# Convert to Pandas dataframe
features_dict = dict.fromkeys([FS_NAME + "__" + feature for feature in model_features])
for row in online_features.field_values:
for feature in model_features:
if features_dict[FS_NAME + "__" + feature] is None:
features_dict[FS_NAME + "__" + feature] = []
if feature in ['passenger_count']:
features_dict[FS_NAME + "__" + feature].append(row.fields[FS_NAME + ":" + feature].int64_val)
else:
features_dict[FS_NAME + "__" + feature].append(row.fields[FS_NAME + ":" + feature].double_val)
features_dict
predict_df = pd.DataFrame.from_dict(features_dict)
model.predict(predict_df)
"""
Explanation: Predict with online features
End of explanation
"""
# Create a Pandas dataframe
features_dict = dict.fromkeys([FS_NAME + "__" + feature for feature in model_features] + ['taxi_id'])
# all taxi_ids
taxi_ids = taxi_df.taxi_id.unique()
entity_rows = []
for taxi_id in taxi_ids.tolist():
entity_rows.append(
GetOnlineFeaturesRequest.EntityRow(fields={'taxi_id': Value(int64_val=taxi_id)})
)
data = _feast_online_client.get_online_features(
feature_refs=[FS_NAME + ":" + feature for feature in model_features],
entity_rows=entity_rows)
for row in data.field_values:
# capture taxi_id
if features_dict['taxi_id'] is None:
features_dict['taxi_id'] = []
features_dict['taxi_id'].append(row.fields['taxi_id'].int64_val)
# get all feature values
for feature in model_features:
if features_dict[FS_NAME + "__" + feature] is None:
features_dict[FS_NAME + "__" + feature] = []
if feature in ['passenger_count']:
features_dict[FS_NAME + "__" + feature].append(row.fields[FS_NAME + ":" + feature].int64_val)
else:
features_dict[FS_NAME + "__" + feature].append(row.fields[FS_NAME + ":" + feature].double_val)
predict_df = pd.DataFrame.from_dict(features_dict)
predict_df.head()
pd.DataFrame.from_dict({'taxi_id': predict_df.taxi_id,
'prediciton': model.predict(predict_df.drop('taxi_id', axis=1))})
"""
Explanation: Batch predict job for all taxi_ids
End of explanation
"""
|
jGaboardi/LP_MIP
|
.ipynb_checkpoints/Primal_v_Dual_Canonical_GUROBI-checkpoint.ipynb
|
lgpl-3.0
|
# Imports
import numpy as np
import gurobipy as gbp
import datetime as dt
# Constants
Aij = np.random.randint(5, 50, 25)
Aij = Aij.reshape(5,5)
AijSum = np.sum(Aij)
Cj = np.random.randint(10, 20, 5)
CjSum = np.sum(Cj)
Bi = np.random.randint(10, 20, 5)
BiSum = np.sum(Bi)
# Matrix Shape
rows = range(len(Aij))
cols = range(len(Aij[0]))
"""
Explanation: <font size='5' face='Courier New'><h1 align="center"><i>The Primal & Dual Linear Programming Problems: Canonical Form</i></h1></font>
<font face='Times New Roman' size='6'><h3 align="center"><u>James D. Gaboardi</u></h3></font>
<font face='Times New Roman' size='5'><h3 align="center">Florida State University | Department of Geography</h3></font>
<p><font size='4' face='Times New Roman'>Adapted from:</font></p>
<p><font size='4' face='Times New Roman'><b>Daskin, M. S.</b> 1995. <i>Network and Discrete Location: Models, Algorithms, and Applications</i>. Hoboken, NJ, USA: John Wiley & Sons, Inc.</font></p>
<font size='7' face='Times New Roman'><b>0. <u>Imports and Data Creation</u></b></font>
End of explanation
"""
# Instantiate Model
mPrimal_Canonical_GUROBI = gbp.Model(' -- Canonical Primal Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for dest in cols:
desc_var.append([])
desc_var[dest].append(mPrimal_Canonical_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='y'+str(dest+1)))
# Update Model
mPrimal_Canonical_GUROBI.update()
#Objective Function
mPrimal_Canonical_GUROBI.setObjective(gbp.quicksum(Cj[dest]*desc_var[dest][0]
for dest in cols),
gbp.GRB.MINIMIZE)
# Constraints
for orig in rows:
mPrimal_Canonical_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[dest][0]
for dest in cols) - Bi[orig] >= 0)
# Optimize
mPrimal_Canonical_GUROBI.optimize()
# Write LP file
mPrimal_Canonical_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mPrimal_Canonical_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mPrimal_Canonical_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Canonical Primal Linear Programming Problem --'
print '\nJames Gaboardi, 2015'
"""
Explanation: <font size='7' face='Times New Roman'><b>1. <u>Primal</u></b></font>
End of explanation
"""
# Instantiate Model
mDual_Canonical_GUROBI = gbp.Model(' -- Canonical Dual Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for dest in cols:
desc_var.append([])
desc_var[dest].append(mDual_Canonical_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='u'+str(dest+1)))
# Update Model
mDual_Canonical_GUROBI.update()
#Objective Function
mDual_Canonical_GUROBI.setObjective(gbp.quicksum(Bi[orig]*desc_var[orig][0]
for orig in rows),
gbp.GRB.MAXIMIZE)
# Constraints
for dest in cols:
mDual_Canonical_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[dest][0]
for orig in rows) - Cj[dest] <= 0)
# Optimize
mDual_Canonical_GUROBI.optimize()
# Write LP file
mDual_Canonical_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mDual_Canonical_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mDual_Canonical_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Canonical Dual Linear Programming Problem --'
print '\nJames Gaboardi, 2015'
"""
Explanation: <font size='7' face='Times New Roman'><b>2. <u>Dual</u></b></font>
End of explanation
"""
|
keras-team/keras-io
|
examples/vision/ipynb/cutmix.ipynb
|
apache-2.0
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
np.random.seed(42)
tf.random.set_seed(42)
"""
Explanation: CutMix data augmentation for image classification
Author: Sayan Nath<br>
Date created: 2021/06/08<br>
Last modified: 2021/06/08<br>
Description: Data augmentation with CutMix for image classification on CIFAR-10.
Introduction
CutMix is a data augmentation technique that addresses the issue of information loss
and inefficiency present in regional dropout strategies.
Instead of removing pixels and filling them with black or grey pixels or Gaussian noise,
you replace the removed regions with a patch from another image,
while the ground truth labels are mixed proportionally to the number of pixels of combined images.
CutMix was proposed in
CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features
(Yun et al., 2019)
It's implemented via the following formulas:
where M is the binary mask which indicates the cutout and the fill-in
regions from the two randomly drawn images and λ ∈ [0, 1] is drawn from a
Beta(α, α) distribution
The coordinates of bounding boxes are:
which indicates the cutout and fill-in regions in case of the images.
The bounding box sampling is represented by:
where rx, ry are randomly drawn from a uniform distribution with upper bound.
Setup
End of explanation
"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
class_names = [
"Airplane",
"Automobile",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
"""
Explanation: Load the CIFAR-10 dataset
In this example, we will use the
CIFAR-10 image classification dataset.
End of explanation
"""
AUTO = tf.data.AUTOTUNE
BATCH_SIZE = 32
IMG_SIZE = 32
"""
Explanation: Define hyperparameters
End of explanation
"""
def preprocess_image(image, label):
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
image = tf.image.convert_image_dtype(image, tf.float32) / 255.0
return image, label
"""
Explanation: Define the image preprocessing function
End of explanation
"""
train_ds_one = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1024)
.map(preprocess_image, num_parallel_calls=AUTO)
)
train_ds_two = (
tf.data.Dataset.from_tensor_slices((x_train, y_train))
.shuffle(1024)
.map(preprocess_image, num_parallel_calls=AUTO)
)
train_ds_simple = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_ds_simple = (
train_ds_simple.map(preprocess_image, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Combine two shuffled datasets from the same training data.
train_ds = tf.data.Dataset.zip((train_ds_one, train_ds_two))
test_ds = (
test_ds.map(preprocess_image, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
"""
Explanation: Convert the data into TensorFlow Dataset objects
End of explanation
"""
def sample_beta_distribution(size, concentration_0=0.2, concentration_1=0.2):
gamma_1_sample = tf.random.gamma(shape=[size], alpha=concentration_1)
gamma_2_sample = tf.random.gamma(shape=[size], alpha=concentration_0)
return gamma_1_sample / (gamma_1_sample + gamma_2_sample)
@tf.function
def get_box(lambda_value):
cut_rat = tf.math.sqrt(1.0 - lambda_value)
cut_w = IMG_SIZE * cut_rat # rw
cut_w = tf.cast(cut_w, tf.int32)
cut_h = IMG_SIZE * cut_rat # rh
cut_h = tf.cast(cut_h, tf.int32)
cut_x = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # rx
cut_y = tf.random.uniform((1,), minval=0, maxval=IMG_SIZE, dtype=tf.int32) # ry
boundaryx1 = tf.clip_by_value(cut_x[0] - cut_w // 2, 0, IMG_SIZE)
boundaryy1 = tf.clip_by_value(cut_y[0] - cut_h // 2, 0, IMG_SIZE)
bbx2 = tf.clip_by_value(cut_x[0] + cut_w // 2, 0, IMG_SIZE)
bby2 = tf.clip_by_value(cut_y[0] + cut_h // 2, 0, IMG_SIZE)
target_h = bby2 - boundaryy1
if target_h == 0:
target_h += 1
target_w = bbx2 - boundaryx1
if target_w == 0:
target_w += 1
return boundaryx1, boundaryy1, target_h, target_w
@tf.function
def cutmix(train_ds_one, train_ds_two):
(image1, label1), (image2, label2) = train_ds_one, train_ds_two
alpha = [0.25]
beta = [0.25]
# Get a sample from the Beta distribution
lambda_value = sample_beta_distribution(1, alpha, beta)
# Define Lambda
lambda_value = lambda_value[0][0]
# Get the bounding box offsets, heights and widths
boundaryx1, boundaryy1, target_h, target_w = get_box(lambda_value)
# Get a patch from the second image (`image2`)
crop2 = tf.image.crop_to_bounding_box(
image2, boundaryy1, boundaryx1, target_h, target_w
)
# Pad the `image2` patch (`crop2`) with the same offset
image2 = tf.image.pad_to_bounding_box(
crop2, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
)
# Get a patch from the first image (`image1`)
crop1 = tf.image.crop_to_bounding_box(
image1, boundaryy1, boundaryx1, target_h, target_w
)
# Pad the `image1` patch (`crop1`) with the same offset
img1 = tf.image.pad_to_bounding_box(
crop1, boundaryy1, boundaryx1, IMG_SIZE, IMG_SIZE
)
# Modify the first image by subtracting the patch from `image1`
# (before applying the `image2` patch)
image1 = image1 - img1
# Add the modified `image1` and `image2` together to get the CutMix image
image = image1 + image2
# Adjust Lambda in accordance to the pixel ration
lambda_value = 1 - (target_w * target_h) / (IMG_SIZE * IMG_SIZE)
lambda_value = tf.cast(lambda_value, tf.float32)
# Combine the labels of both images
label = lambda_value * label1 + (1 - lambda_value) * label2
return image, label
"""
Explanation: Define the CutMix data augmentation function
The CutMix function takes two image and label pairs to perform the augmentation. It samples λ(l) from the Beta distribution and returns a bounding box from get_box function. We then crop the second image (image2) and pad this image in the final padded image at the same location.
End of explanation
"""
# Create the new dataset using our `cutmix` utility
train_ds_cmu = (
train_ds.shuffle(1024)
.map(cutmix, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
.prefetch(AUTO)
)
# Let's preview 9 samples from the dataset
image_batch, label_batch = next(iter(train_ds_cmu))
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.title(class_names[np.argmax(label_batch[i])])
plt.imshow(image_batch[i])
plt.axis("off")
"""
Explanation: Note: we are combining two images to create a single one.
Visualize the new dataset after applying the CutMix augmentation
End of explanation
"""
def resnet_layer(
inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation="relu",
batch_normalization=True,
conv_first=True,
):
conv = keras.layers.Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(1e-4),
)
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = keras.layers.BatchNormalization()(x)
if activation is not None:
x = keras.layers.Activation(activation)(x)
else:
if batch_normalization:
x = keras.layers.BatchNormalization()(x)
if activation is not None:
x = keras.layers.Activation(activation)(x)
x = conv(x)
return x
def resnet_v20(input_shape, depth, num_classes=10):
if (depth - 2) % 6 != 0:
raise ValueError("depth should be 6n+2 (eg 20, 32, 44 in [a])")
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = keras.layers.Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(
inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False,
)
x = keras.layers.add([x, y])
x = keras.layers.Activation("relu")(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = keras.layers.AveragePooling2D(pool_size=8)(x)
y = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(
num_classes, activation="softmax", kernel_initializer="he_normal"
)(y)
# Instantiate model.
model = keras.models.Model(inputs=inputs, outputs=outputs)
return model
def training_model():
return resnet_v20((32, 32, 3), 20)
initial_model = training_model()
initial_model.save_weights("initial_weights.h5")
"""
Explanation: Define a ResNet-20 model
End of explanation
"""
model = training_model()
model.load_weights("initial_weights.h5")
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds_cmu, validation_data=test_ds, epochs=15)
test_loss, test_accuracy = model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
"""
Explanation: Train the model with the dataset augmented by CutMix
End of explanation
"""
model = training_model()
model.load_weights("initial_weights.h5")
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds_simple, validation_data=test_ds, epochs=15)
test_loss, test_accuracy = model.evaluate(test_ds)
print("Test accuracy: {:.2f}%".format(test_accuracy * 100))
"""
Explanation: Train the model using the original non-augmented dataset
End of explanation
"""
|
dietmarw/EK5312_ElectricalMachines
|
Chapman/Ch2-Example_2-10.ipynb
|
unlicense
|
%pylab notebook
"""
Explanation: Electric Machinery Fundamentals 5th edition
Chapter 2 (Code examples)
Example 2-10
Calculate and plot the magnetization current of a 230/115 transformer operating at 230 volts and 50/60 Hz. This program also calculates the rms value of the mag. current.
Import the PyLab namespace (provides set of useful commands and constants like Pi)
End of explanation
"""
# import the pandas library which provides useful data processing functions
import pandas as pd
# The data file is stored in the "data/" subfolder
fileUrl = 'data/mag_curve_1.dat'
# Now we read the file in:
data = pd.read_csv(fileUrl, # the address where to download the datafile from
sep=' ', # our data source uses a blank space as separation
comment='%', # ignore lines starting with a "%"
skipinitialspace = True, # ignore intital spaces
header=None, # we don't have a header line defined...
names=['mmf_data','flux_data'] # ...instead we define the names here
)
"""
Explanation: The magnetization curve for this transformer is shown in Figure 2-45, and can be found in mag_curve_1.dat at this book's web site.
<img src="figs/FigC_2-45.jpg" width="66%">
End of explanation
"""
VM = 325.0 # Maximum voltage (V)
NP = 850 # Primary turns
"""
Explanation: Initialize values:
End of explanation
"""
freq = 60 # Freq (Hz)
w = 2 * pi * freq
"""
Explanation: Calculate angular velocity for 60 Hz:
End of explanation
"""
time = arange(0, 1/30, 1/3000) # 0 to 1/30 sec
flux = -VM/(w*NP) * cos(w * time)
"""
Explanation: Calculate flux versus time:
End of explanation
"""
mmf=interp(flux, data['flux_data'], data['mmf_data'])
"""
Explanation: Calculate the mmf corresponding to a given flux using the interpolation function:
End of explanation
"""
im60 = mmf / NP
"""
Explanation: Calculate the magnetization current:
End of explanation
"""
irms60 = sqrt(sum(im60**2)/im60.size)
"""
Explanation: Calculate the rms value of the current:
End of explanation
"""
freq = 50 # Freq (Hz)
w = 2 * pi * freq
"""
Explanation: Calculate angular velocity for 50 Hz:
End of explanation
"""
time = arange(0, 1.0/30, 1.0/3000) # 0 to 1/30 sec
flux = -VM/(w*NP) * cos(w * time)
"""
Explanation: Calculate flux versus time:
End of explanation
"""
mmf=interp(flux, data['flux_data'], data['mmf_data'])
"""
Explanation: Calculate the mmf corresponding to a given flux using the interpolation function:
End of explanation
"""
im50 = mmf / NP
"""
Explanation: Calculate the magnetization current:
End of explanation
"""
irms50 = sqrt(sum(im50**2)/im50.size)
"""
Explanation: Calculate the rms value of the current:
End of explanation
"""
print('The rms current at 60 Hz is {:.3f} A.'.format(irms60))
print('The rms current at 50 Hz is {:.3f} A.'.format(irms50))
"""
Explanation: Resultant rms values are:
End of explanation
"""
rc('text', usetex=True) # enable LaTeX commands for plot
title('Magnetization current at 50 \& 60 Hz')
xlabel('Time (s)')
ylabel(r'$\mathbf{I_m}$ \textbf{(A)}')
plot(time,im60, time, im50)
legend(('$60 Hz,\, I_{{RMS}} = {:.3f}\,A$'.format(irms60),
'$50 Hz,\, I_{{RMS}} = {:.3f}\,A$'.format(irms50)), loc=4);
"""
Explanation: Plot the magnetization current:
End of explanation
"""
|
mdeff/ntds_2016
|
algorithms/02_sol_clustering.ipynb
|
mit
|
# Load libraries
# Math
import numpy as np
# Visualization
%matplotlib notebook
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import ndimage
# Print output of LFR code
import subprocess
# Sparse matrix
import scipy.sparse
import scipy.sparse.linalg
# 3D visualization
import pylab
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot
# Import data
import scipy.io
# Import functions in lib folder
import sys
sys.path.insert(1, 'lib')
# Import helper functions
%load_ext autoreload
%autoreload 2
from lib.utils import construct_kernel
from lib.utils import compute_kernel_kmeans_EM
from lib.utils import compute_kernel_kmeans_spectral
from lib.utils import compute_purity
# Import distance function
import sklearn.metrics.pairwise
# Remove warnings
import warnings
warnings.filterwarnings("ignore")
# Load MNIST raw data images
mat = scipy.io.loadmat('datasets/mnist_raw_data.mat')
X = mat['Xraw']
n = X.shape[0]
d = X.shape[1]
Cgt = mat['Cgt'] - 1; Cgt = Cgt.squeeze()
nc = len(np.unique(Cgt))
print('Number of data =',n)
print('Data dimensionality =',d);
print('Number of classes =',nc);
"""
Explanation: A Network Tour of Data Science
Xavier Bresson, Winter 2016/17
Exercise 4 - Code 2 : Unsupervised Learning
Unsupervised Clustering with Kernel K-Means
End of explanation
"""
# Your code here
Ker = construct_kernel(X,'linear') # Compute linear Kernel for standard K-Means
Theta = np.ones(n) # Equal weight for each data
[C_kmeans,En_kmeans] = compute_kernel_kmeans_EM(nc,Ker,Theta,10)
acc= compute_purity(C_kmeans,Cgt,nc)
print('accuracy standard kmeans=',acc)
"""
Explanation: Question 1a: What is the clustering accuracy of standard/linear K-Means?<br>
Hint: You may use functions Ker=construct_kernel(X,'linear') to compute the
linear kernel and [C_kmeans, En_kmeans]=compute_kernel_kmeans_EM(n_classes,Ker,Theta,10) with Theta= np.ones(n) to run the standard K-Means algorithm, and accuracy = compute_purity(C_computed,C_solution,n_clusters) that returns the
accuracy.
End of explanation
"""
# Your code here
Ker = construct_kernel(X,'gaussian') # Compute Gaussian Kernel
Theta = np.ones(n) # Equal weight for each data
C_kmeans,_ = compute_kernel_kmeans_EM(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with EM=',acc)
C_kmeans,_ = compute_kernel_kmeans_spectral(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with SPECTRAL=',acc)
# Your code here
Ker = construct_kernel(X,'polynomial',[1,0,2])
Theta = np.ones(n) # Equal weight for each data
C_kmeans, En_kmeans = compute_kernel_kmeans_EM(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with EM=',acc)
[C_kmeans,En_kmeans] = compute_kernel_kmeans_spectral(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with SPECTRAL=',acc)
"""
Explanation: Question 1b: What is the clustering accuracy for the kernel K-Means algorithm with<br>
(1) Gaussian Kernel for the EM approach and the Spectral approach?<br>
(2) Polynomial Kernel for the EM approach and the Spectral approach?<br>
Hint: You may use functions Ker=construct_kernel(X,'gaussian') and Ker=construct_kernel(X,'polynomial',[1,0,2]) to compute the non-linear kernels<br>
Hint: You may use functions C_kmeans,__ = compute_kernel_kmeans_EM(K,Ker,Theta,10) for the EM kernel KMeans algorithm and C_kmeans,__ = compute_kernel_kmeans_spectral(K,Ker,Theta,10) for the Spectral kernel K-Means algorithm.<br>
End of explanation
"""
# Your code here
KNN_kernel = 50
Ker = construct_kernel(X,'kNN_gaussian',KNN_kernel)
Theta = np.ones(n) # Equal weight for each data
C_kmeans,_ = compute_kernel_kmeans_EM(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with EM=',acc)
C_kmeans,_ = compute_kernel_kmeans_spectral(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with SPECTRAL=',acc)
# Your code here
KNN_kernel = 50
Ker = construct_kernel(X,'kNN_cosine_binary',KNN_kernel)
Theta = np.ones(n) # Equal weight for each data
C_kmeans,_ = compute_kernel_kmeans_EM(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with EM=',acc)
C_kmeans,_ = compute_kernel_kmeans_spectral(nc,Ker,Theta,10)
acc = compute_purity(C_kmeans,Cgt,nc)
print('accuracy non-linear kmeans with SPECTRAL=',acc)
"""
Explanation: Question 1c: What is the clustering accuracy for the kernel K-Means algorithm with<br>
(1) KNN_Gaussian Kernel for the EM approach and the Spectral approach?<br>
(2) KNN_Cosine_Binary Kernel for the EM approach and the Spectral approach?<br>
You can test for the value KNN_kernel=50.<br>
Hint: You may use functions Ker = construct_kernel(X,'kNN_gaussian',KNN_kernel)
and Ker = construct_kernel(X,'kNN_cosine_binary',KNN_kernel) to compute the
non-linear kernels.
End of explanation
"""
|
Oslandia/open-data-bikes-analysis
|
notebooks/Prediction-Lyon.ipynb
|
mit
|
%matplotlib inline
import numpy as np
import pandas as pd
import graphviz
from xgboost import plot_tree, plot_importance, to_graphviz
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
import folium
%load_ext watermark
%watermark -d -v -p numpy,pandas,xgboost,matplotlib,folium -g -m -w
"""
Explanation: Bicycle-stations Availability Prediction
Use a supervised machine learning algorithm to predict the availability for each bicyle-sharing stations in Lyon (France) based on the history data.
I use the tree method XGBoost to predict a "probability" of bikes availability for each station. A number close to 1. means that you have several available bikes. A number close to 0. means you don't have many bikes.
Some Imports
End of explanation
"""
from sources.prediction import (datareader, complete_data, cleanup, bikes_probability,
time_resampling, prepare_data_for_training, fit, prediction)
"""
Explanation: The module prediction.py contains some functions dedicated to the bicyle-sharing stations predictions.
End of explanation
"""
DATAFILE = '../data/lyon.csv'
raw = datareader(DATAFILE)
raw.head()
"""
Explanation: Read and prepare the data
End of explanation
"""
print(raw.last_update.min())
print(raw.last_update.max())
"""
Explanation: Min and max dates of the timeseries
End of explanation
"""
df_clean = cleanup(raw)
df_clean.head()
"""
Explanation: Clean up some columns : drop some lines with the 'CLOSED' status, drop duplicates, remove missing values, etc.
End of explanation
"""
df = (df_clean.pipe(time_resampling)
.pipe(complete_data)
.pipe(bikes_probability))
df.head()
"""
Explanation: Pipe some processing data functions :
time resampling to get data every 10 minutes (i.e. 10T)
add some columns : day of the week, hour of the day, etc.
compute the bikes (number) "probability". It's just a normalization for each station with num_avail_bikes / total
End of explanation
"""
start = pd.Timestamp("2017-07-11T00:00:00") # Tuesday
predict_date = pd.Timestamp("2017-07-26T10:00:00") # wednesday
# predict the next 30 minutes
freq = '30T'
# number of predictions at 'predict_date'.
# Here, the next 30 minutes and the next hour (30 minutes + 30 minutes).
# If you want to predict the next 3 hours, every 30 minutes, thus set periods=6
periods = 2
train_X, train_Y, test_X, test_Y = prepare_data_for_training(df,
predict_date,
freq=freq,
start=start,
periods=periods,
observation='probability')
train_X.head()
# Observation with a shift of T+30 minutes in comparison to train_X.
# This is the 'future' availability used for traning.
train_Y.head()
train_X.iloc[3200:3210]
print(train_X.tail())
print(test_X.head())
"""
Explanation: This is the final dataset. For further prediction, I could add some weather forecasts data to these features.
Prediction
Let's select a time window (start, stop) to a single prediction.
Two weeks of data for training
Predict the 'probability' for the next 30 minutes and 1 hour for every station.
Split the dataset into train and test dataset
Compute the RMSE (Root Mean Squared Error) between the observations (i.e. probability) and the predicted values
End of explanation
"""
# the 'booster'
bst, train_prg = fit(train_X, train_Y, test_X, test_Y)
train_prg = pd.DataFrame({"train": train_prg["train"]['rmse'], "test": train_prg["test"]['rmse']})
colors = sns.color_palette('Set1', 2)
sns.palplot(colors)
with sns.axes_style("darkgrid", {'xtick.major.size': 8.0}):
fig, ax = plt.subplots(figsize=(10,6))
for k, label, color in zip(train_prg.values.T, range(2), colors):
print(k)
plt.plot(100*k, color=color, label=label)
plt.legend(train_prg.columns)
plt.xlabel('XGBoost iteration')
plt.ylabel("Error (%)")
plt.xticks(np.linspace(0, 25, 6))
plt.yticks(np.linspace(0, 30, 6))
sns.despine()
plt.tight_layout()
plt.savefig("../images/lyon_prediction_training_curves.png")
# compute the prediction from test_*
pred = prediction(bst, test_X, test_Y)
pred[:5]
print("Number of predictions: {}".format(len(pred)))
# Compute the RMSE
rmse = np.sqrt(np.mean((pred - test_Y)**2))
rmse
# must install graphviz
# plot_tree(bst)
"""
Explanation: The fit function create some data structure for the XGBoost from the train and test DataFrames (i.e. xgb.DMatrix), configure the model and launch it with the objective: 'reg:logistic'. It's a regression, not a classification.
End of explanation
"""
result = test_X.copy()
result['ts_future'] = test_Y.index.shift(1, freq=freq)
result['observation'] = test_Y.copy()
result['ts_future'] = test_Y.index.shift(1, freq=freq)
result['prediction'] = pred
result['error'] = pred - test_Y
result['relative_error'] = 100. * np.abs(pred - test_Y) / test_Y
result['quad_error'] = (pred - test_Y)**2
result.to_csv("prediction-freq-{}-{}.csv".format(freq, predict_date))
result.head(10)
"""
Explanation: Store the results
End of explanation
"""
locations = pd.read_csv("../data/lyon-stations.csv")
locations.shape
"""
Explanation: Some maps
CSV file with station coordinates
End of explanation
"""
mask = locations['idstation'].isin(result.station.unique())
mask.sum()
locations = locations[mask]
locations = locations.rename_axis({'idstation': 'station'}, axis=1)
locations.head()
"""
Explanation: Some stations were removed when the data were cleaned up. Remove them from the location data.
End of explanation
"""
locations["nom"] = locations['nom'].str.replace("'", "'")
"""
Explanation: Some station names contains the ' character. Replace it by the HTML code for folium.
End of explanation
"""
data_to_plot = result.loc[predict_date]
data_to_plot.shape
data_to_plot.head()
yhat = data_to_plot[['station', 'prediction']].merge(locations, on='station')
yhat.head()
y = data_to_plot[['station', 'observation']].merge(locations, on='station')
error = data_to_plot[['station', 'error']].merge(locations, on='station')
colormap = 'RdYlBu'
cmap = plt.get_cmap(colormap)
"""
Explanation: Select the prediction data for a specific timestamp
End of explanation
"""
# show the colormap use to plot the stations, values [0, 1]
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
fig, ax = plt.subplots(1)
fig.subplots_adjust(top=0.95, bottom=0.80, left=0.2, right=0.99)
ax.set_xticks([0., 64, 128, 192, 256])
ax.set_xticklabels([0., 0.25, 0.5, 0.75, 1.])
ax.set_xlabel('Bikes Availability')
ax.imshow(gradient, aspect='auto', cmap=cmap, vmin=0, vmax=1)
plt.title('Colormap used to plot stations')
"""
Explanation: See
https://matplotlib.org/examples/color/colormaps_reference.html
https://matplotlib.org/api/colors_api.html
End of explanation
"""
color = lambda x: mpl.colors.to_hex(cmap(x))
# Lyon (France) Position
position = [45.750000, 4.850000]
mp_pred = folium.Map(location=position, zoom_start=13, tiles='cartodbpositron')
# Map of the predicted values
for _,row in yhat.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lon']],
radius=2,
popup=row['nom'],
color=color(row['prediction']),
fill=True,
fill_opacity=0.3,
fill_color=color(row['prediction'])
).add_to(mp_pred)
mp_pred
# Map for the observation
mp_obs = folium.Map(location=position, zoom_start=13, tiles='cartodbpositron')
# Map of the observations
for _,row in y.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lon']],
radius=2,
popup=row['nom'],
color=color(row['observation']),
fill=True,
fill_opacity=0.3,
fill_color=color(row['observation'])
).add_to(mp_obs)
mp_obs
# Colormap for error (by default, the color map fits for [0, 1] values)
norm = mpl.colors.Normalize(vmin=-1, vmax=1)
color_error = lambda x: mpl.colors.to_hex(cmap(norm(x)))
# Map for the errors
mp_error = folium.Map(location=position, zoom_start=13, tiles='cartodbpositron')
# Map of the errors
for _,row in error.iterrows():
folium.CircleMarker(
location=[row['lat'], row['lon']],
radius=2,
popup=row['nom'],
color=color_error(row['error']),
fill=True,
fill_opacity=0.3,
fill_color=color_error(row['error'])
).add_to(mp_error)
mp_error
"""
Explanation: Red stations if there are 0 or a few available bikes
Blue stations if there are several available bikes
End of explanation
"""
|
zerothi/ts-tbt-sisl-tutorial
|
TB_08/run.ipynb
|
gpl-3.0
|
graphene = sisl.geom.graphene(orthogonal=True)
# Graphene tight-binding parameters
on, nn = 0, -2.7
H_minimal = sisl.Hamiltonian(graphene)
H_minimal.construct([[0.1, 1.44], [on, nn]])
"""
Explanation: In TBtrans and TranSiesta one is capable of performing real space transport calculations by using real space self-energies (see here).
Currently the real space self-energy calculation has to be performed in sisl since it is not implemented in TranSiesta.
A real space self-energy is a $\mathbf k$ averaged self-energy which can emulate any 2D or 3D electrode. I.e. for an STM junction a tip and a surface. In such a system the surface could be modelled using the real space self-energy to remove mirror effects of STM tips. This is important since the distance between periodic images disturbs the calculation due to long range potential effects.
The basic principle for calculating the real space self-energy is the Brillouin zone integral:
\begin{equation}
\mathbf G_{\mathcal R}(E) = \int_{\mathrm{BZ}}\mathbf G_\mathbf k
\end{equation}
In this example we will construct an STM tip probing a graphene flake.
This example is rather complicated and is the reason why basically everything is already done for you. Please try and understand each step.
We start by creating the graphene tight-binding model.
End of explanation
"""
# object = H_minimal
# semi_axes = 0, x-axis uses recursive self-energy calculation
# k_axis = 1, y-axis uses a Brillouin zone integral
# unfold = (10, 10, 1), the full real-space green function is equivalent to the system
# H_minimal.tile(10, 0).tile(10, 1)
RSSE = sisl.RealSpaceSE(H_minimal, 0, 1, (10, 10, 1))
"""
Explanation: Once the minimal graphene unit-cell (here orthogonal) is created we now turn to the calculation of the real space self-energy.
The construction of this object is somewhat complicated and has a set of required input options:
- object: the Hamiltonian
- semi_axes: which axes to use for the recursive self-energy
- k_axis: which axis to integrate in the Brillouin zone
- unfold: how many times the object needs to be unfolded along each lattice vector, this is an integer vector of length 3
End of explanation
"""
H_elec, elec_indices = RSSE.real_space_coupling(ret_indices=True)
H_elec.write('GRAPHENE.nc')
"""
Explanation: Now we can create the real space self-energy.
In TBtrans (and TranSiesta) the electrode atomic indices must be in consecutive order.
This is a little troublesome since the natural order in a device would be an order according to $x$, $y$ or $z$. To create the correct order we extract the real space coupling matrix which is where the real space self-energy would live, the self-energy is calculated using:
\begin{equation}
\boldsymbol\Sigma^{\mathcal R} = E \mathbf S - \mathbf H - \Big[\int_{\mathrm{BZ}} \mathbf G\Big]^{-1}.
\end{equation}
Another way to calculate the self-energy would be to transfer the Green function from the infinite bulk into the region of interest:
\begin{equation}
\boldsymbol\Sigma^{\mathcal R} = \mathbf V_{\mathcal R\infty}\mathbf G_{\infty\setminus\mathcal R}\mathbf V_{\infty\mathcal R}.
\end{equation}
From the 2nd equation it is obvious that the self-energy only lives on the boundary that $\mathbf V_{\infty\mathcal R}$ couples to. Exactly this region is extracted using real_space_coupling as below. Take some time to draw a simple 2D lattice coupling and confirm the area that the real-space self energies couples to.
In this example we also retrieve the indices for the electrode atoms, those that connect out to the infinite plane.
End of explanation
"""
H = RSSE.real_space_parent()
# Create the true device by re-arranging the atoms
indices = np.arange(len(H))
indices = np.delete(indices, elec_indices)
# first electrodes, then rest of device
indices = np.concatenate([elec_indices, indices])
# Now re-arange matrix
H = H.sub(indices)
"""
Explanation: The above yields the electrode region which contains the self-energies. Since the full device region is nothing but the H_minimal tiled $10\times10$ times with an attached STM tip on top. Here we need to arange the electrode atoms first, then the final device region. The real_space_parent method returns the Hamiltonian that obeys the unfolded size. In this case $10\times10$ times larger. One should always use this method to get the correct device order of atoms since the order of tiling is determined by the semi_axes and k_axis arguments.
End of explanation
"""
STM = sisl.Geometry([0, 0, 0], atoms=sisl.Atom('Au', R=1.0001), sc=sisl.SuperCell([10, 10, 1], nsc=[1, 1, 3]))
H_STM = sisl.Hamiltonian(STM)
H_STM.construct([(0.1, 1.1), (0, -0.75)])
H_STM.write('STM.nc')
mid_xyz = H.geometry.center()
idx = H.close(mid_xyz, R=1.33)[0]
H_device = H.add(H_STM, offset=H.geometry.xyz[idx] - H_STM.geometry.xyz[0] + [0, 0, 2])
na = len(H)
idx = H_device.close(na, R=(0.1, 2.25))[1][0]
H_device[na, idx] = -0.1
H_device[idx, na] = -0.1
H_device.write('DEVICE.nc')
"""
Explanation: Lastly, we need to add the STM tip. Here we simply add a gold atom and manually add the hoppings. Since this is tight-binding we have full control over the self-energy and potential land-scape. Therefore we don't need to extend the electrode region to screen off the tip region. In DFT systems, a properly screened region is required.
End of explanation
"""
# A real space transport calculation ONLY needs the Gamma-point
gamma = sisl.MonkhorstPack(H_elec, [1] * 3)
# Energy contour
dE = 0.04
E = np.arange(-2, 2 + dE / 2, dE)
sisl.io.tableSile("contour.E", 'w').write_data(E, np.zeros(E.size) + dE)
# Now create the file (should take around 3-4 minutes)
eta = 0.001 * 1j
with sisl.io.tbtgfSileTBtrans("GRAPHENE.TBTGF") as f:
f.write_header(gamma, E + eta)
for ispin, new_k, k, e in tqdm(f, unit="rsSE"):
if new_k:
f.write_hamiltonian(H_elec.Hk(format='array', dtype=np.complex128))
SeHSE = RSSE.self_energy(e + eta, bulk=True, coupling=True)
f.write_self_energy(SeHSE)
"""
Explanation: Before we can run calculations we need to create the real space self-energy for the graphene flake in sisl.
Since the algorithm is not implemented in TBtrans (nor TranSiesta) it needs to be done here.
This is somewhat complicated since the files requires a specific order. For ease this tutorial implements it for you.
End of explanation
"""
tbt = sisl.get_sile('siesta.TBT.nc')
"""
Explanation: Exercises
Calculate transport, density of state and bond-currents.
Please search the manual on how to edit the RUN.fdf according to the following:
Force tbtrans to use the generated TBTGF file. This is the same as in TB_07 example, i.e. using out-of-core calculations
Force tbtrans to use an energy grid defined in an external file (contour.E)
Plot the bond-currents and check their symmetry, does the symmetry depend on the injection point?
Is there a particular reason for choosing semi_axis and k_axes as they are chosen? Or could they be swapped?
TIME Redo the calculations using 3 electrodes (left/right/tip) using k-points. Converge transmission and then plot the bond-currents.
Do they look as the real space calculation? If they are the same, why?
End of explanation
"""
|
beyondvalence/biof509_wtl
|
Wk09-dataset-processing/Wk09_Dataset-preprocessing_wl.ipynb
|
mit
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
"""
Explanation: Week 9 - Dataset preprocessing
Before we utilize machine learning algorithms we must first prepare our dataset. This can often take a significant amount of time and can have a large impact on the performance of our models.
We will be looking at four different types of data:
Tabular data
Image data
Text
Tabular data
We will look at three different steps we may need to take when handling tabular data:
Missing data
Normalization
Categorical data
Image data
Image data can present a number of issues that we must address to maximize performance:
Histogram normalization
Windows
Pyramids (for detection at different scales)
Centering
Text
Text can present a number of issues, mainly due to the number of words that can be found in our features. There are a number of ways we can convert from text to usable features:
Bag of words
Parsing
End of explanation
"""
from sklearn import linear_model
x = np.array([[0, 0], [1, 1], [2, 2]])
y = np.array([0, 1, 2])
print(x,y)
clf = linear_model.LinearRegression()
clf.fit(x, y)
print("1. coef", clf.coef_)
x_missing = np.array([[0, 0], [1, np.nan], [2, 2]])
print(x_missing, y)
clf = linear_model.LinearRegression()
clf.fit(x_missing, y)
print(clf.coef_)
import pandas as pd
x = pd.DataFrame([[0,1,2,3,4,5,6],
[2,np.nan,7,4,9,1,3],
[0.1,0.12,0.11,0.15,0.16,0.11,0.14],
[100,120,np.nan,127,130,121,124],
[4,1,7,9,0,2,np.nan]], ).T
x.columns = index=['A', 'B', 'C', 'D', 'E']
y = pd.Series([29.0,
31.2,
63.25,
57.27,
66.3,
26.21,
48.24])
print(x,"\n\n", y, sep="")
x.dropna()
x.fillna(value={'A':100,'B':200,'C':400,'D':600,'E':1200})
x.fillna(value=x.mean())
"""
Explanation: Tabular data
Missing data
Normalization
Categorical data
Missing data
There are a number of ways to handle missing data:
Drop all records with a value missing
Substitute all missing values with an average value
Substitute all missing values with some placeholder value, i.e. 0, 1e9, -1e9, etc
Predict missing values based on other attributes
Add additional feature indicating when a value is missing
If the machine learning model will be used with new data it is important to consider the possibility of receiving records with values missing that we have not observed previously in the training dataset.
The simplest approach is to remove any records that have missing data. Unfortunately missing values are often not randomly distributed through a dataset and removing them can introduce bias.
An alternative approach is to substitute the missing values. This can be with the mean of the feature across all the records or the value can be predicted based on the values of the other features in the dataset. Placeholder values can also be used with decision trees but do not work as well for most other algorithms.
Finally, missing values can themselves be useful features. Adding an additional feature indicating when a value is missing is often used to include this information.
End of explanation
"""
x_filled = x.fillna(value=x.mean())
print(x_filled)
x_norm = (x_filled - x_filled.min()) / (x_filled.max() - x_filled.min())
print(x_norm)
from sklearn import preprocessing
scaling = preprocessing.MinMaxScaler().fit(x_filled)
scaling.transform(x_filled)
"""
Explanation: Normalization
Many machine learning algorithms expect features to have similar distributions and scales.
A classic example is gradient descent, if features are on different scales some weights will update faster than others because the feature values scale the weight updates.
There are two common approaches to normalization:
Z-score standardization
Min-max scaling
Z-score standardization
Z-score standardization rescales values so that they have a mean of zero and a standard deviation of 1. Specifically we perform the following transformation:
$$z = \frac{x - \mu}{\sigma}$$
Min-max scaling
An alternative is min-max scaling that transforms data into the range of 0 to 1. Specifically:
$$x_{norm} = \frac{x - x_{min}}{x_{max} - x_{min}}$$
Min-max scaling is less commonly used but can be useful for image data and in some neural networks.
End of explanation
"""
x = pd.DataFrame([[0,1,2,3,4,5,6],
[2,np.nan,7,4,9,1,3],
[0.1,0.12,0.11,0.15,0.16,0.11,0.14],
[100,120,np.nan,127,130,121,124],
['Green','Red','Blue','Blue','Green','Red','Green']], ).T
x.columns = index=['A', 'B', 'C', 'D', 'E']
print(x)
x_cat = x.copy()
for val in x['E'].unique():
x_cat['E_{0}'.format(val)] = x_cat['E'] == val
x_cat
"""
Explanation: Categorical data
Categorical data can take one of a number of possible values. The different categories may be related to each other or be largely independent and unordered.
Continuous variables can be converted to categorical variables by applying a threshold.
End of explanation
"""
# Exercise 1
print(x)
x_ex1 = x.copy()
x_ex1['is_NA'] = x_ex1.isnull().sum(axis=1)
x_ex1 = x_ex1.fillna(value={'B':x_ex1['B'].mean(),'D':x_ex1['D'].mean()})
print(x_ex1)
# Exercise 2
print(x)
x_ex2 = x.copy()
# remove categorical variables
for val in x_ex2['E'].unique():
x_ex2['E_{0}'.format(val)] = x_ex2['E'] == val
x_ex2 = x_ex2.drop('E',1)
# replace missing values
x_ex2 = x_ex2.fillna(value={'B':x_ex2['B'].mean(),'D':x_ex2['D'].mean()})
print(x_ex2)
from sklearn import preprocessing
scale = preprocessing.StandardScaler().fit(x_ex2)
x_ex2_s = scale.transform(x_ex2)
print(x_ex2_s)
x_ex2_s.mean(axis=0) # column means close to zero
x_ex2_s.std(axis=0) # standard deviations at 1
# Exercise 3
x_ex3 = x.copy()
# set threshold for 'C' at 0.125
x_ex3['C'] = x_ex3['C'] > 0.125
print(x_ex3)
"""
Explanation: Exercises
Substitute missing values in x with the column mean and add an additional column to indicate when missing values have been substituted. The isnull method on the pandas dataframe may be useful.
Convert x to the z-scaled values. The StandardScaler method in the preprocessing module can be used or the z-scaled values calculated directly.
Convert x['C'] into a categorical variable using a threshold of 0.125
End of explanation
"""
# http://scikit-image.org/docs/stable/auto_examples/color_exposure/plot_equalize.html#example-color-exposure-plot-equalize-py
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
ax_img.set_adjustable('box-forced')
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig = plt.figure(figsize=(10, 7))
axes = np.zeros((2,4), dtype=np.object)
axes[0,0] = fig.add_subplot(2, 4, 1)
for i in range(1,4):
axes[0,i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0,4):
axes[1,i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()
from sklearn.feature_extraction import image
img = data.page()
fig, ax = plt.subplots(1,1)
ax.imshow(img, cmap=plt.cm.gray)
ax.set_axis_off()
plt.show()
print(img.shape)
patches = image.extract_patches_2d(img, (20, 20), max_patches=2, random_state=0)
patches.shape
plt.imshow(patches[0], cmap=plt.cm.gray)
plt.show()
from sklearn import datasets
digits = datasets.load_digits()
#print(digits.DESCR)
fig, ax = plt.subplots(1,1, figsize=(1,1))
ax.imshow(digits.data[0].reshape((8,8)), cmap=plt.cm.gray, interpolation='nearest')
"""
Explanation: Image data
Depending on the type of task being performed there are a variety of steps we may want to take in working with images:
Histogram normalization
Windows and pyramids (for detection at different scales)
Centering
Occasionally the camera used to generate an image will use 10- to 14-bits while a 16-bit file format will be used. In this situation all the pixel intensities will be in the lower values. Rescaling to the full range (or to 0-1) can be useful.
Further processing can be done to alter the histogram of the image.
When looking for particular features in an image a sliding window can be used to check different locations. This can be combined with an image pyramid to detect features at different scales. This is often needed when objects can be at different distances from the camera.
If objects are sparsely distributed in an image a faster approach than using sliding windows is to identify objects with a simple threshold and then test only the bounding boxes containing objects. Before running these through a model centering based on intensity can be a useful approach. Small offsets, rotations and skewing can be used to generate additional training data.
End of explanation
"""
from sklearn.datasets import fetch_20newsgroups
twenty_train = fetch_20newsgroups(subset='train',
categories=['comp.graphics', 'sci.med'], shuffle=True, random_state=0)
print(twenty_train.target_names)
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(twenty_train.data)
print(X_train_counts.shape)
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print(X_train_tfidf.shape, X_train_tfidf[:5,:15].toarray())
print(twenty_train.data[0])
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(twenty_train.data[0:1])
print(X_train_counts[0].toarray())
print(count_vect.vocabulary_.keys())
"""
Explanation: Text
When working with text the simplest approach is known as bag of words. In this approach we simply count the number of instances of each word, and then adjust the values based on how commonly the word is used.
The first task is to break a piece of text up into individual tokens. The number of occurrences of each word is then recorded. More rarely used words are likely to be more interesting and so word counts are scaled by the inverse document frequency.
We can extend this to look at not just individual words but also bigrams and trigrams.
End of explanation
"""
# Exercise 1
from sklearn.feature_extraction import image
img = data.page()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig = plt.figure(figsize=(8, 5))
axes = np.zeros((2,4), dtype=np.object)
axes[0,0] = fig.add_subplot(2, 4, 1)
for i in range(1,4):
axes[0,i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])
for i in range(0,4):
axes[1,i] = fig.add_subplot(2, 4, 5+i)
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.tight_layout()
plt.show()
# Exercise 2
from skimage.transform import resize
fig = plt.figure(figsize=(12, 4))
plt.subplot(1,3,1)
patches = image.extract_patches_2d(img, (40, 40), max_patches=2, random_state=0)
patches.shape
resize(patches, (200,200))
plt.imshow(patches[0], cmap=plt.cm.gray)
plt.subplot(1,3,2)
patches2 = image.extract_patches_2d(img, (20, 20), max_patches=2, random_state=0)
patches2.shape
resize(patches2, (200,200))
plt.imshow(patches2[0], cmap=plt.cm.gray)
plt.subplot(1,3,3)
patches3 = image.extract_patches_2d(img, (10, 10), max_patches=2, random_state=0)
patches3.shape
resize(patches3, (200,200))
plt.imshow(patches3[0], cmap=plt.cm.gray)
plt.show()
# Exercise 3
count_vect = CountVectorizer(stop_words=("the", "a"))
X_train_counts = count_vect.fit_transform(twenty_train.data[0:1])
print(X_train_counts[0].toarray())
print(count_vect.vocabulary_.keys())
# Exercise 4
count_vect = CountVectorizer(stop_words=("the", "a"), ngram_range=(1,2))
X_train_counts = count_vect.fit_transform(twenty_train.data[0:1])
print(X_train_counts[0].toarray())
print(count_vect.vocabulary_.keys())
"""
Explanation: Exercises
Choose one of the histogram prcessing methods and apply it to the page example.
Take patches for the page example used above at different scales (10, 20 and 40 pixels). The resulting patches should be rescaled to have the same size.
Change the vectorization approach to ignore very common words such as 'the' and 'a'. These are known as stop words. Reading the documentation should help.
Change the vectorization approach to consider both single words and sequences of 2 words. Reading the documentation should help.
End of explanation
"""
|
phoebe-project/phoebe2-docs
|
2.3/tutorials/constraints_builtin.ipynb
|
gpl-3.0
|
#!pip install -I "phoebe>=2.3,<2.4"
"""
Explanation: Advanced: Built-In Constraints
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle.
End of explanation
"""
b.filter(qualifier='asini', context='constraint')
b.get_parameter(qualifier='asini', component='binary', context='constraint')
"""
Explanation: Built-in Constraints
There are a number of built-in constraints that can be applied to our system. Those added by default are listed below as well as in the API docs for b.add_constraint:
asini
These constraint handles computing the projected semi-major axis (either for an orbit or a star) along the line of sight and can be automatically inverted to solve for either 'asini', 'sma', or 'incl'.
End of explanation
"""
b.get_parameter(qualifier='esinw', context='constraint')
b.get_parameter(qualifier='ecosw', context='constraint')
"""
Explanation: esinw, ecosw
These constraints handle computing the projected eccentricity which can be helpful in that they are better representations of the geometry of a light curve and result in symmetric posteriors for near-circular orbits.
Both can be inverted to also automatically solve for 'ecc' or 'per0'.
End of explanation
"""
b.get_parameter(qualifier='t0_perpass', context='constraint')
"""
Explanation: t0
This constraint handles converting between different t0 conventions - namely providing a reference time at periastron passage (t0_perpass) and at superior conjunction (t0_supconj).
Currently, this constraint only supports inverting to be solved for 't0_supconj' (ie you cannot automatically invert this constraint to constraint phshift or per0).
End of explanation
"""
b.filter(qualifier='freq', context='constraint')
b.get_parameter(qualifier='freq', component='binary', context='constraint')
b.get_parameter(qualifier='freq', component='primary', context='constraint')
"""
Explanation: freq
This constraint handles the simple conversion to frequency from period - whether that be rotational or orbital - and does support inversion to solve for 'period'.
End of explanation
"""
b.filter(qualifier='mass', context='constraint')
b.get_parameter(qualifier='mass', component='primary', context='constraint')
"""
Explanation: mass
This constraint handles solving for the mass of a component by obeying Kepler's third law within the parent orbit.
It can be inverted to solve for 'sma', 'q', or 'period' (in addition to 'mass').
End of explanation
"""
b.filter(qualifier='sma', context='constraint')
b.get_parameter(qualifier='sma', component='primary', context='constraint')
"""
Explanation: component sma
This constraint handles computing the semi-major axis of a component about the center of mass of its parent orbit. Note that this is not the same as the semi-major axis of the parent orbit.
This currently can be inverted to solve for 'sma' of the parent orbit, but not 'q'.
End of explanation
"""
b.filter(qualifier='asini', context='constraint')
b.get_parameter(qualifier='asini', component='primary', context='constraint')
"""
Explanation: component asini
This constraint handles computing the projected semi-major axis of a component about the center of mass of its parent orbit. Note that this is not the same as the asini of the parent orbit.
This currently can be inverted to solve for 'sma' of the parent orbit, but not 'q' or 'incl'.
End of explanation
"""
b.filter(qualifier='requiv_max', context='constraint')
b.get_parameter(qualifier='requiv_max', component='primary', context='constraint')
"""
Explanation: requiv_max
This constraint handles solving for the maxium equivalent radius (for a detached system).
For a semi-detached system, the radius itself is constrained to be exactly this value.
End of explanation
"""
b.filter(qualifier='period', context='constraint')
b.get_parameter(qualifier='period', component='primary', context='constraint')
"""
Explanation: rotation period
This constraint handles computing the rotation period of a star given its synchronicity parameter (syncpar).
It can be inverted to solve for any of the three parameters 'period' (both rotational and orbital) and 'syncpar'.
End of explanation
"""
b.filter(qualifier='incl', context='constraint')
b.get_parameter(qualifier='incl', component='primary', context='constraint')
b.filter(qualifier='long_an', context='constraint')
b.get_parameter(qualifier='long_an', component='primary', context='constraint')
"""
Explanation: pitch/yaw (incl/long_an)
pitch constrains the relation between the orbital and rotational inclination whereas yaw constrains the relation between the orbital and rotational long_an. When pitch and yaw are set to 0, the system is aligned.
End of explanation
"""
|
tkurfurst/deep-learning
|
language-translation/dlnd_language_translation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
view_sentence_range = (5025, 5036)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
# len({word: None for word in source_text.split()}))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text.split('\n')]
target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] + [target_vocab_to_int['<EOS>']] for sentence in target_text.split('\n')]
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
# TODO: Implement Function
inputs = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None])
learn_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, learn_rate, keep_prob
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
"""
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
td_end_removed = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
td_start_added = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), td_end_removed], 1)
return td_start_added
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
"""
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the beginning of each batch.
End of explanation
"""
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
# TODO: Implement Function
# Encoder embedding
# source_vocab_size = len(source_letter_to_int)
# enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, 1000, rnn_size)
# Encoder
# enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
enc_LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size)
enc_LSTM = tf.contrib.rnn.DropoutWrapper(enc_LSTM, output_keep_prob=keep_prob)
enc_LSTM = tf.contrib.rnn.MultiRNNCell([enc_LSTM] * num_layers)
enc_RNN_out, enc_RNN_state = tf.nn.dynamic_rnn(enc_LSTM, rnn_inputs, dtype=tf.float32)
return enc_RNN_state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State *
:param dec_cell: Decoder RNN Cell *
:param dec_embed_input: Decoder embedded input *
:param sequence_length: Sequence Length *
:param decoding_scope: TenorFlow Variable Scope for decoding *
:param output_fn: Function to apply the output layer *
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
# TODO: Implement Function
train_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state, name=None)
train_pred, fin_state, fin_cntxt_state = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell,\
train_decoder_fn,inputs=dec_embed_input,sequence_length=sequence_length,\
parallel_iterations=None, swap_memory=False,time_major=False, scope=decoding_scope, name=None)
train_logits = output_fn(train_pred)
return train_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state *
:param dec_cell: Decoder RNN Cell *
:param dec_embeddings: Decoder embeddings *
:param start_of_sequence_id: GO ID *
:param end_of_sequence_id: EOS Id *
:param maximum_length: The maximum allowed time steps to decode *
:param vocab_size: Size of vocabulary *
:param decoding_scope: TensorFlow Variable Scope for decoding *
:param output_fn: Function to apply the output layer *
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
# TODO: Implement Function
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn, encoder_state, dec_embeddings,\
target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], maximum_length, vocab_size, dtype=tf.int32, name=None)
infer_logits, fin_state, fin_cntxt_state = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell,\
infer_decoder_fn, inputs=None, sequence_length=maximum_length,\
parallel_iterations=None, swap_memory=False,time_major=False, scope=decoding_scope, name=None)
# infer_logits = output_fn(infer_pred)
return infer_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
"""
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length *
:param rnn_size: RNN Size *
:param num_layers: Number of layers *
:param target_vocab_to_int: Dictionary to go from the target words to an id *
:param keep_prob: Dropout keep probability *
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
# Decoder RNNs
dec_LSTM = tf.contrib.rnn.BasicLSTMCell(rnn_size)
dec_LSTM = tf.contrib.rnn.DropoutWrapper(dec_LSTM, output_keep_prob=keep_prob)
dec_LSTM = tf.contrib.rnn.MultiRNNCell([dec_LSTM] * num_layers)
# Create Output Function
with tf.variable_scope("decoding") as decoding_scope:
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
# Train Logits
train_logits = decoding_layer_train(encoder_state, dec_LSTM,\
dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
# Infer Logits
infer_logits = decoding_layer_infer(encoder_state, dec_LSTM,\
dec_embeddings, target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>'], sequence_length, vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder **
:param target_data: Target placeholder **
:param keep_prob: Dropout keep probability placeholder **
:param batch_size: Batch Size **
:param sequence_length: Sequence Length **
:param source_vocab_size: Source vocabulary size **
:param target_vocab_size: Target vocabulary size **
:param enc_embedding_size: Decoder embedding size **
:param dec_embedding_size: Encoder embedding size **
:param rnn_size: RNN Size **
:param num_layers: Number of layers **
:param target_vocab_to_int: Dictionary to go from the target words to an id **
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
# Apply embedding to the input data for the encoder
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
# Encode the input
encoder_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
# Process target data
p_target_data = process_decoding_input(target_data, target_vocab_to_int, batch_size)
# Apply embedding to the target data for the decoder
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, p_target_data)
# Decode the encoded input
train_logits, infer_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state,\
target_vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, infer_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
"""
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 128
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.005
# Dropout Keep Probability
keep_probability = 0.8
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forums to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
sentence = sentence.lower()
sequence = [vocab_to_int.get(word, vocab_to_int['<UNK>']) for word in sentence.split()]
return sequence
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'She dislikes lions, but loves grapes in Paris in the winter.'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
tensorflow/docs-l10n
|
site/ja/probability/examples/Fitting_DPMM_Using_pSGLD.ipynb
|
apache-2.0
|
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Probability Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
plt.style.use('ggplot')
tfd = tfp.distributions
def session_options(enable_gpu_ram_resizing=True):
"""Convenience function which sets common `tf.Session` options."""
config = tf.ConfigProto()
config.log_device_placement = True
if enable_gpu_ram_resizing:
# `allow_growth=True` makes it possible to connect multiple colabs to your
# GPU. Otherwise the colab malloc's all GPU ram.
config.gpu_options.allow_growth = True
return config
def reset_sess(config=None):
"""Convenience function to create the TF graph and session, or reset them."""
if config is None:
config = session_options()
tf.reset_default_graph()
global sess
try:
sess.close()
except:
pass
sess = tf.InteractiveSession(config=config)
# For reproducibility
rng = np.random.RandomState(seed=45)
tf.set_random_seed(76)
# Precision
dtype = np.float64
# Number of training samples
num_samples = 50000
# Ground truth loc values which we will infer later on. The scale is 1.
true_loc = np.array([[-4, -4],
[0, 0],
[4, 4]], dtype)
true_components_num, dims = true_loc.shape
# Generate training samples from ground truth loc
true_hidden_component = rng.randint(0, true_components_num, num_samples)
observations = (true_loc[true_hidden_component]
+ rng.randn(num_samples, dims).astype(dtype))
# Visualize samples
plt.scatter(observations[:, 0], observations[:, 1], 1)
plt.axis([-10, 10, -10, 10])
plt.show()
"""
Explanation: 前処理行列を用いた確率的勾配ランジュバン動力学法を使用してディリクレ過程混合モデルを適合する
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/probability/examples/Fitting_DPMM_Using_pSGLD"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Fitting_DPMM_Using_pSGLD.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Fitting_DPMM_Using_pSGLD.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/probability/examples/Fitting_DPMM_Using_pSGLD.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
このノートブックでは、ガウス分布のディリクレ過程混合モデルを適合し、大量のサンプルのクラスター化とクラスター数の推論を同時に行う方法を説明します。推論には、前処理行列を用いた確率的勾配ランジュバン動力学法(pSGLD)を使用します。
目次
サンプル
モデル
最適化
結果を視覚化する
4.1. クラスター化された結果
4.2. 不確実性を視覚化する
4.3. 選択された混合コンポーネントの平均とスケール
4.4. 各混合コンポーネントの混合重み
4.5. $\alpha$ の収束
4.6. イテレーションで推論されるクラスターの数
4.7. RMSProp を使ってモデルを適合する
結論
1. サンプル
まず、トイデータセットをセットアップします。3 つの二変量ガウス分布から 50,000 個のランダムサンプルを生成します。
End of explanation
"""
reset_sess()
# Upperbound on K
max_cluster_num = 30
# Define trainable variables.
mix_probs = tf.nn.softmax(
tf.Variable(
name='mix_probs',
initial_value=np.ones([max_cluster_num], dtype) / max_cluster_num))
loc = tf.Variable(
name='loc',
initial_value=np.random.uniform(
low=-9, #set around minimum value of sample value
high=9, #set around maximum value of sample value
size=[max_cluster_num, dims]))
precision = tf.nn.softplus(tf.Variable(
name='precision',
initial_value=
np.ones([max_cluster_num, dims], dtype=dtype)))
alpha = tf.nn.softplus(tf.Variable(
name='alpha',
initial_value=
np.ones([1], dtype=dtype)))
training_vals = [mix_probs, alpha, loc, precision]
# Prior distributions of the training variables
#Use symmetric Dirichlet prior as finite approximation of Dirichlet process.
rv_symmetric_dirichlet_process = tfd.Dirichlet(
concentration=np.ones(max_cluster_num, dtype) * alpha / max_cluster_num,
name='rv_sdp')
rv_loc = tfd.Independent(
tfd.Normal(
loc=tf.zeros([max_cluster_num, dims], dtype=dtype),
scale=tf.ones([max_cluster_num, dims], dtype=dtype)),
reinterpreted_batch_ndims=1,
name='rv_loc')
rv_precision = tfd.Independent(
tfd.InverseGamma(
concentration=np.ones([max_cluster_num, dims], dtype),
rate=np.ones([max_cluster_num, dims], dtype)),
reinterpreted_batch_ndims=1,
name='rv_precision')
rv_alpha = tfd.InverseGamma(
concentration=np.ones([1], dtype=dtype),
rate=np.ones([1]),
name='rv_alpha')
# Define mixture model
rv_observations = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=mix_probs),
components_distribution=tfd.MultivariateNormalDiag(
loc=loc,
scale_diag=precision))
"""
Explanation: 2. モデル
ここでは、対称ディレクレ事前分布を使ってガウス分布のディレクレ過程混合を定義します。このノートブックでは、ベクトル量を太字で記述しています。$i\in{1,\ldots,N}$ 個のサンプルに対し、$j \in{1,\ldots,K}$ ガウス分布の混合行列は、次のように計算されます。
$$\begin{align} p(\boldsymbol{x}1,\cdots, \boldsymbol{x}N) &=\prod{i=1}^N \text{GMM}(x_i), \ &,\quad \text{with};\text{GMM}(x_i)=\sum{j=1}^K\pi_j\text{Normal}(x_i,|,\text{loc}=\boldsymbol{\mu_{j}},,\text{scale}=\boldsymbol{\sigma_{j}})\ \end{align}$$ とし、ここでは次のようになります。
$$\begin{align} x_i&\sim \text{Normal}(\text{loc}=\boldsymbol{\mu}{z_i},,\text{scale}=\boldsymbol{\sigma}{z_i}) \ z_i &= \text{Categorical}(\text{prob}=\boldsymbol{\pi}),\ &,\quad \text{with};\boldsymbol{\pi}={\pi_1,\cdots,\pi_K}\ \boldsymbol{\pi}&\sim\text{Dirichlet}(\text{concentration}={\frac{\alpha}{K},\cdots,\frac{\alpha}{K}})\ \alpha&\sim \text{InverseGamma}(\text{concentration}=1,,\text{rate}=1)\ \boldsymbol{\mu_j} &\sim \text{Normal}(\text{loc}=\boldsymbol{0}, ,\text{scale}=\boldsymbol{1})\ \boldsymbol{\sigma_j} &\sim \text{InverseGamma}(\text{concentration}=\boldsymbol{1},,\text{rate}=\boldsymbol{1})\ \end{align}$$
クラスターの推論されたインデックスを表す $z_i$ を通じて、それぞれの $x_i$ を $j$ 番目のクラスターに代入するが目標です。
理想的なディリクレ混合モデルでは $K$ は $\infty$ に設定されますが、$K$ が十分に大きい場合は、ディリクレ混合モデルに近似できることが知られています。$K$ の初期値を任意に設定していますが、単純なガウス混合モデルとは異なり、最適なクラスターの数も最適化によって推論されます。
このノートブックでは、二変量ガウス分布を混合コンポーネントをして使用し、$K$ を 30 に設定します。
End of explanation
"""
# Learning rates and decay
starter_learning_rate = 1e-6
end_learning_rate = 1e-10
decay_steps = 1e4
# Number of training steps
training_steps = 10000
# Mini-batch size
batch_size = 20
# Sample size for parameter posteriors
sample_size = 100
"""
Explanation: 3. 最適化
このモデルは、前処理行列を用いた確率的勾配ランジュバン動力学法(pSGLD)で最適化するため、大量のサンプルに対して、モデルをミニバッチの勾配降下法で最適化することができます。
$t,$th 回目のイタレーションにおいてミニバッチサイズ $M$ でパラメータ $\boldsymbol{\theta}\equiv{\boldsymbol{\pi},,\alpha,, \boldsymbol{\mu_j},,\boldsymbol{\sigma_j}}$ を更新するために、更新を次のようにサンプリングします。
$$\begin{align*} \Delta \boldsymbol { \theta } _ { t } & \sim \frac { \epsilon _ { t } } { 2 } \bigl[ G \left( \boldsymbol { \theta } _ { t } \right) \bigl( \nabla _ { \boldsymbol { \theta } } \log p \left( \boldsymbol { \theta } _ { t } \right)
\frac { N } { M } \sum _ { k = 1 } ^ { M } \nabla _ \boldsymbol { \theta } \log \text{GMM}(x_{t_k})\bigr) + \sum_\boldsymbol{\theta}\nabla_\theta G \left( \boldsymbol { \theta } _ { t } \right) \bigr]\ &+ G ^ { \frac { 1 } { 2 } } \left( \boldsymbol { \theta } _ { t } \right) \text { Normal } \left( \text{loc}=\boldsymbol{0} ,, \text{scale}=\epsilon _ { t }\boldsymbol{1} \right)\ \end{align*}$$
上記の方程式では、$\epsilon _ { t }$ は $t,$ 回目のイタレーションの学習率で、$\log p(\theta_t)$ は $\theta$ の対数事前分布の和です。$G ( \boldsymbol { \theta } _ { t })$ は各パラメータの勾配のスケールを調整する前処理行列です。
End of explanation
"""
# Placeholder for mini-batch
observations_tensor = tf.compat.v1.placeholder(dtype, shape=[batch_size, dims])
# Define joint log probabilities
# Notice that each prior probability should be divided by num_samples and
# likelihood is divided by batch_size for pSGLD optimization.
log_prob_parts = [
rv_loc.log_prob(loc) / num_samples,
rv_precision.log_prob(precision) / num_samples,
rv_alpha.log_prob(alpha) / num_samples,
rv_symmetric_dirichlet_process.log_prob(mix_probs)[..., tf.newaxis]
/ num_samples,
rv_observations.log_prob(observations_tensor) / batch_size
]
joint_log_prob = tf.reduce_sum(tf.concat(log_prob_parts, axis=-1), axis=-1)
# Make mini-batch generator
dx = tf.compat.v1.data.Dataset.from_tensor_slices(observations)\
.shuffle(500).repeat().batch(batch_size)
iterator = tf.compat.v1.data.make_one_shot_iterator(dx)
next_batch = iterator.get_next()
# Define learning rate scheduling
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.polynomial_decay(
starter_learning_rate,
global_step, decay_steps,
end_learning_rate, power=1.)
# Set up the optimizer. Don't forget to set data_size=num_samples.
optimizer_kernel = tfp.optimizer.StochasticGradientLangevinDynamics(
learning_rate=learning_rate,
preconditioner_decay_rate=0.99,
burnin=1500,
data_size=num_samples)
train_op = optimizer_kernel.minimize(-joint_log_prob)
# Arrays to store samples
mean_mix_probs_mtx = np.zeros([training_steps, max_cluster_num])
mean_alpha_mtx = np.zeros([training_steps, 1])
mean_loc_mtx = np.zeros([training_steps, max_cluster_num, dims])
mean_precision_mtx = np.zeros([training_steps, max_cluster_num, dims])
init = tf.global_variables_initializer()
sess.run(init)
start = time.time()
for it in range(training_steps):
[
mean_mix_probs_mtx[it, :],
mean_alpha_mtx[it, 0],
mean_loc_mtx[it, :, :],
mean_precision_mtx[it, :, :],
_
] = sess.run([
*training_vals,
train_op
], feed_dict={
observations_tensor: sess.run(next_batch)})
elapsed_time_psgld = time.time() - start
print("Elapsed time: {} seconds".format(elapsed_time_psgld))
# Take mean over the last sample_size iterations
mean_mix_probs_ = mean_mix_probs_mtx[-sample_size:, :].mean(axis=0)
mean_alpha_ = mean_alpha_mtx[-sample_size:, :].mean(axis=0)
mean_loc_ = mean_loc_mtx[-sample_size:, :].mean(axis=0)
mean_precision_ = mean_precision_mtx[-sample_size:, :].mean(axis=0)
"""
Explanation: 尤度 $\text{GMM}(x_{t_k})$ の同時対数確率と事前確率 $p(\theta_t)$ を pSGLD の損失関数として使用します。
pSGLD の API に説明されているとおり、事前確率の和をサンプルサイズ $N$ で除算する必要があります。
End of explanation
"""
loc_for_posterior = tf.compat.v1.placeholder(
dtype, [None, max_cluster_num, dims], name='loc_for_posterior')
precision_for_posterior = tf.compat.v1.placeholder(
dtype, [None, max_cluster_num, dims], name='precision_for_posterior')
mix_probs_for_posterior = tf.compat.v1.placeholder(
dtype, [None, max_cluster_num], name='mix_probs_for_posterior')
# Posterior of z (unnormalized)
unnomarlized_posterior = tfd.MultivariateNormalDiag(
loc=loc_for_posterior, scale_diag=precision_for_posterior)\
.log_prob(tf.expand_dims(tf.expand_dims(observations, axis=1), axis=1))\
+ tf.log(mix_probs_for_posterior[tf.newaxis, ...])
# Posterior of z (normarizad over latent states)
posterior = unnomarlized_posterior\
- tf.reduce_logsumexp(unnomarlized_posterior, axis=-1)[..., tf.newaxis]
cluster_asgmt = sess.run(tf.argmax(
tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={
loc_for_posterior: mean_loc_mtx[-sample_size:, :],
precision_for_posterior: mean_precision_mtx[-sample_size:, :],
mix_probs_for_posterior: mean_mix_probs_mtx[-sample_size:, :]})
idxs, count = np.unique(cluster_asgmt, return_counts=True)
print('Number of inferred clusters = {}\n'.format(len(count)))
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Number of elements in each cluster = {}\n'.format(count))
def convert_int_elements_to_consecutive_numbers_in(array):
unique_int_elements = np.unique(array)
for consecutive_number, unique_int_element in enumerate(unique_int_elements):
array[array == unique_int_element] = consecutive_number
return array
cmap = plt.get_cmap('tab10')
plt.scatter(
observations[:, 0], observations[:, 1],
1,
c=cmap(convert_int_elements_to_consecutive_numbers_in(cluster_asgmt)))
plt.axis([-10, 10, -10, 10])
plt.show()
"""
Explanation: 4. 結果を視覚化する
4.1. クラスター化された結果
まず、クラスター化の結果を視覚化します。
各サンプル $x_i$ をクラスター $j$ に代入するには、$z_i$ の事後分布を次のように計算します。
$$\begin{align} j = \underset{z_i}{\arg\max},p(z_i,|,x_i,,\boldsymbol{\theta}) \end{align}$$
End of explanation
"""
# Calculate entropy
posterior_in_exponential = tf.exp(posterior)
uncertainty_in_entropy = tf.reduce_mean(-tf.reduce_sum(
posterior_in_exponential
* posterior,
axis=1), axis=1)
uncertainty_in_entropy_ = sess.run(uncertainty_in_entropy, feed_dict={
loc_for_posterior: mean_loc_mtx[-sample_size:, :],
precision_for_posterior: mean_precision_mtx[-sample_size:, :],
mix_probs_for_posterior: mean_mix_probs_mtx[-sample_size:, :]
})
plt.title('Entropy')
sc = plt.scatter(observations[:, 0],
observations[:, 1],
1,
c=uncertainty_in_entropy_,
cmap=plt.cm.viridis_r)
cbar = plt.colorbar(sc,
fraction=0.046,
pad=0.04,
ticks=[uncertainty_in_entropy_.min(),
uncertainty_in_entropy_.max()])
cbar.ax.set_yticklabels(['low', 'high'])
cbar.set_label('Uncertainty', rotation=270)
plt.show()
"""
Explanation: ほぼ同数のサンプルが適切なクラスターに代入され、モデルが正しい数のクラスターを推論できたことが確認できます。
4.2. 不確実性を視覚化する
次に、サンプルごとにクラスター化の結果の不確実性を視覚化して確認します。
不確実性は、次のようにエントロピーを使用して計算します。
$$\begin{align} \text{Uncertainty}\text{entropy} = -\frac{1}{K}\sum^{K}{z_i=1}\sum^{O}_{l=1}p(z_i,|,x_i,,\boldsymbol{\theta}_l)\log p(z_i,|,x_i,,\boldsymbol{\theta}_l) \end{align}$$
pSGLD では、イタレーションごとのトレーニングパラメータの値をその事後分布のサンプルとして処理します。したがって、パラメータごとに $O$ イタレーションの値に対するエントロピーを計算します。最終的なエントロピー値は、全クラスター代入のエントロピーを平均化して計算されます。
End of explanation
"""
for idx, numbe_of_samples in zip(idxs, count):
print(
'Component id = {}, Number of elements = {}'
.format(idx, numbe_of_samples))
print(
'Mean loc = {}, Mean scale = {}\n'
.format(mean_loc_[idx, :], mean_precision_[idx, :]))
"""
Explanation: 上記のグラフでは、輝度が低いほど不確実性が高いことを示します。クラスターの境界近くのサンプルの不確実性が特に高いことがわかります。直感的に、これらのサンプルをクラスター化するのが困難であることを知ることができます。
4.3. 選択された混合コンポーネントの平均とスケール
次に、選択されたクラスターの $\mu_j$ と $\sigma_j$ を見てみましょう。
End of explanation
"""
plt.ylabel('Mean posterior of mixture weight')
plt.xlabel('Component')
plt.bar(range(0, max_cluster_num), mean_mix_probs_)
plt.show()
"""
Explanation: またしても、$\boldsymbol{\mu_j}$ と $\boldsymbol{\sigma_j}$ は、グラウンドトゥルースに近い結果が得られています。
4.4 各混合コンポーネントの混合重み
推論された混合重みも確認しましょう。
End of explanation
"""
print('Value of inferred alpha = {0:.3f}\n'.format(mean_alpha_[0]))
plt.ylabel('Sample value of alpha')
plt.xlabel('Iteration')
plt.plot(mean_alpha_mtx)
plt.show()
"""
Explanation: いくつか(3 つ)の混合コンポーネントにのみ大きな重みがあり、残りはゼロに近い値となっているのがわかります。これはまた、モデルがサンプルの分布を構成する正しい数の混合コンポーネントを推論したことも示しています。
4.5. $\alpha$ の収束
ディリクレ分布の集中度パラメータ $\alpha$ の収束を調べましょう。
End of explanation
"""
step = sample_size
num_of_iterations = 50
estimated_num_of_clusters = []
interval = (training_steps - step) // (num_of_iterations - 1)
iterations = np.asarray(range(step, training_steps+1, interval))
for iteration in iterations:
start_position = iteration-step
end_position = iteration
result = sess.run(tf.argmax(
tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={
loc_for_posterior:
mean_loc_mtx[start_position:end_position, :],
precision_for_posterior:
mean_precision_mtx[start_position:end_position, :],
mix_probs_for_posterior:
mean_mix_probs_mtx[start_position:end_position, :]})
idxs, count = np.unique(result, return_counts=True)
estimated_num_of_clusters.append(len(count))
plt.ylabel('Number of inferred clusters')
plt.xlabel('Iteration')
plt.yticks(np.arange(1, max(estimated_num_of_clusters) + 1, 1))
plt.plot(iterations - 1, estimated_num_of_clusters)
plt.show()
"""
Explanation: ディリクレ混合モデルでは $\alpha$ が小さいほど期待されるクラスター数が低くなることを考慮すると、モデルはイタレーションごとに最適な数のクラスターを学習しているようです。
4.6. イテレーションで推論されるクラスターの数
推論されるクラスター数が、イテレーションを通じてどのように変化するかを視覚化します。
これを行うには、インテレーションでのクラスター数を推論します。
End of explanation
"""
# Learning rates and decay
starter_learning_rate_rmsprop = 1e-2
end_learning_rate_rmsprop = 1e-4
decay_steps_rmsprop = 1e4
# Number of training steps
training_steps_rmsprop = 50000
# Mini-batch size
batch_size_rmsprop = 20
# Define trainable variables.
mix_probs_rmsprop = tf.nn.softmax(
tf.Variable(
name='mix_probs_rmsprop',
initial_value=np.ones([max_cluster_num], dtype) / max_cluster_num))
loc_rmsprop = tf.Variable(
name='loc_rmsprop',
initial_value=np.zeros([max_cluster_num, dims], dtype)
+ np.random.uniform(
low=-9, #set around minimum value of sample value
high=9, #set around maximum value of sample value
size=[max_cluster_num, dims]))
precision_rmsprop = tf.nn.softplus(tf.Variable(
name='precision_rmsprop',
initial_value=
np.ones([max_cluster_num, dims], dtype=dtype)))
alpha_rmsprop = tf.nn.softplus(tf.Variable(
name='alpha_rmsprop',
initial_value=
np.ones([1], dtype=dtype)))
training_vals_rmsprop =\
[mix_probs_rmsprop, alpha_rmsprop, loc_rmsprop, precision_rmsprop]
# Prior distributions of the training variables
#Use symmetric Dirichlet prior as finite approximation of Dirichlet process.
rv_symmetric_dirichlet_process_rmsprop = tfd.Dirichlet(
concentration=np.ones(max_cluster_num, dtype)
* alpha_rmsprop / max_cluster_num,
name='rv_sdp_rmsprop')
rv_loc_rmsprop = tfd.Independent(
tfd.Normal(
loc=tf.zeros([max_cluster_num, dims], dtype=dtype),
scale=tf.ones([max_cluster_num, dims], dtype=dtype)),
reinterpreted_batch_ndims=1,
name='rv_loc_rmsprop')
rv_precision_rmsprop = tfd.Independent(
tfd.InverseGamma(
concentration=np.ones([max_cluster_num, dims], dtype),
rate=np.ones([max_cluster_num, dims], dtype)),
reinterpreted_batch_ndims=1,
name='rv_precision_rmsprop')
rv_alpha_rmsprop = tfd.InverseGamma(
concentration=np.ones([1], dtype=dtype),
rate=np.ones([1]),
name='rv_alpha_rmsprop')
# Define mixture model
rv_observations_rmsprop = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=mix_probs_rmsprop),
components_distribution=tfd.MultivariateNormalDiag(
loc=loc_rmsprop,
scale_diag=precision_rmsprop))
og_prob_parts_rmsprop = [
rv_loc_rmsprop.log_prob(loc_rmsprop),
rv_precision_rmsprop.log_prob(precision_rmsprop),
rv_alpha_rmsprop.log_prob(alpha_rmsprop),
rv_symmetric_dirichlet_process_rmsprop
.log_prob(mix_probs_rmsprop)[..., tf.newaxis],
rv_observations_rmsprop.log_prob(observations_tensor)
* num_samples / batch_size
]
joint_log_prob_rmsprop = tf.reduce_sum(
tf.concat(log_prob_parts_rmsprop, axis=-1), axis=-1)
# Define learning rate scheduling
global_step_rmsprop = tf.Variable(0, trainable=False)
learning_rate = tf.train.polynomial_decay(
starter_learning_rate_rmsprop,
global_step_rmsprop, decay_steps_rmsprop,
end_learning_rate_rmsprop, power=1.)
# Set up the optimizer. Don't forget to set data_size=num_samples.
optimizer_kernel_rmsprop = tf.train.RMSPropOptimizer(
learning_rate=learning_rate,
decay=0.99)
train_op_rmsprop = optimizer_kernel_rmsprop.minimize(-joint_log_prob_rmsprop)
init_rmsprop = tf.global_variables_initializer()
sess.run(init_rmsprop)
start = time.time()
for it in range(training_steps_rmsprop):
[
_
] = sess.run([
train_op_rmsprop
], feed_dict={
observations_tensor: sess.run(next_batch)})
elapsed_time_rmsprop = time.time() - start
print("RMSProp elapsed_time: {} seconds ({} iterations)"
.format(elapsed_time_rmsprop, training_steps_rmsprop))
print("pSGLD elapsed_time: {} seconds ({} iterations)"
.format(elapsed_time_psgld, training_steps))
mix_probs_rmsprop_, alpha_rmsprop_, loc_rmsprop_, precision_rmsprop_ =\
sess.run(training_vals_rmsprop)
"""
Explanation: インテレーションを繰り返すと、クラスターの数が 3 に近づいていきます。イテレーションを繰り返すうちに、$\alpha$ がより小さな値に収束することから、モデルが最適なクラスターの数を推論するようにパラメータを正しく学習していることがわかります。
興味深いことに、ずっと後のイテレーションで収束した $\alpha$ とは異なり、早期のイテレーションで推論がすでに適切なクラスター数に収束していることが示されています。
4.7. RMSProp を使ってモデルを適合する
このセクションでは、pSGLD のモンテカルロサンプリングスキームの有効性を確認するために、RMSProp を使用してモデルを適合します。RMSProp にはサンプリングスキームがなく、pSGLD はF RMSProp に基づいているため、比較のために RMSProp を選んでいます。
End of explanation
"""
cluster_asgmt_rmsprop = sess.run(tf.argmax(
tf.reduce_mean(posterior, axis=1), axis=1), feed_dict={
loc_for_posterior: loc_rmsprop_[tf.newaxis, :],
precision_for_posterior: precision_rmsprop_[tf.newaxis, :],
mix_probs_for_posterior: mix_probs_rmsprop_[tf.newaxis, :]})
idxs, count = np.unique(cluster_asgmt_rmsprop, return_counts=True)
print('Number of inferred clusters = {}\n'.format(len(count)))
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print('Number of elements in each cluster = {}\n'.format(count))
cmap = plt.get_cmap('tab10')
plt.scatter(
observations[:, 0], observations[:, 1],
1,
c=cmap(convert_int_elements_to_consecutive_numbers_in(
cluster_asgmt_rmsprop)))
plt.axis([-10, 10, -10, 10])
plt.show()
"""
Explanation: pSGLD に比較して、RMSProp のイテレーション数の方が長いにも関わらず、RMSProp による最適化の方がはるかに高速に行われています。
次に、クラスター化の結果を確認しましょう。
End of explanation
"""
plt.ylabel('MAP inferece of mixture weight')
plt.xlabel('Component')
plt.bar(range(0, max_cluster_num), mix_probs_rmsprop_)
plt.show()
"""
Explanation: この実験では、RMSProp によって正しいクラスター数を推論することができませんでした。混合重みも見てみましょう。
End of explanation
"""
|
prasants/pyds
|
12.Introduction_to_Pandas.ipynb
|
mit
|
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: Table of Contents
<p><div class="lev1 toc-item"><a href="#Pandas:-Introduction" data-toc-modified-id="Pandas:-Introduction-1"><span class="toc-item-num">1 </span>Pandas: Introduction</a></div><div class="lev2 toc-item"><a href="#Importing-Libraries" data-toc-modified-id="Importing-Libraries-11"><span class="toc-item-num">1.1 </span>Importing Libraries</a></div><div class="lev1 toc-item"><a href="#Data-Structures" data-toc-modified-id="Data-Structures-2"><span class="toc-item-num">2 </span>Data Structures</a></div><div class="lev2 toc-item"><a href="#Series" data-toc-modified-id="Series-21"><span class="toc-item-num">2.1 </span>Series</a></div><div class="lev3 toc-item"><a href="#Mini-Project" data-toc-modified-id="Mini-Project-211"><span class="toc-item-num">2.1.1 </span>Mini-Project</a></div><div class="lev2 toc-item"><a href="#DataFrames" data-toc-modified-id="DataFrames-22"><span class="toc-item-num">2.2 </span>DataFrames</a></div><div class="lev1 toc-item"><a href="#Indexing-and-Selection" data-toc-modified-id="Indexing-and-Selection-3"><span class="toc-item-num">3 </span>Indexing and Selection</a></div><div class="lev2 toc-item"><a href="#Selecting-Columns" data-toc-modified-id="Selecting-Columns-31"><span class="toc-item-num">3.1 </span>Selecting Columns</a></div><div class="lev2 toc-item"><a href="#Using-loc-and-iloc" data-toc-modified-id="Using-loc-and-iloc-32"><span class="toc-item-num">3.2 </span>Using <code>loc</code> and <code>iloc</code></a></div><div class="lev2 toc-item"><a href="#Conditional-Selection" data-toc-modified-id="Conditional-Selection-33"><span class="toc-item-num">3.3 </span>Conditional Selection</a></div><div class="lev2 toc-item"><a href="#Creating-New-Columns" data-toc-modified-id="Creating-New-Columns-34"><span class="toc-item-num">3.4 </span>Creating New Columns</a></div><div class="lev2 toc-item"><a href="#Removing-Columns" data-toc-modified-id="Removing-Columns-35"><span class="toc-item-num">3.5 </span>Removing Columns</a></div><div class="lev2 toc-item"><a href="#Dataframe-from-a-Dictionry" data-toc-modified-id="Dataframe-from-a-Dictionry-36"><span class="toc-item-num">3.6 </span>Dataframe from a Dictionry</a></div><div class="lev2 toc-item"><a href="#Exercise" data-toc-modified-id="Exercise-37"><span class="toc-item-num">3.7 </span>Exercise</a></div><div class="lev1 toc-item"><a href="#Handling-Missing-Data" data-toc-modified-id="Handling-Missing-Data-4"><span class="toc-item-num">4 </span>Handling Missing Data</a></div><div class="lev3 toc-item"><a href="#What-is-Missing-Data?" data-toc-modified-id="What-is-Missing-Data?-401"><span class="toc-item-num">4.0.1 </span>What is Missing Data?</a></div><div class="lev2 toc-item"><a href="#Imputation" data-toc-modified-id="Imputation-41"><span class="toc-item-num">4.1 </span>Imputation</a></div><div class="lev2 toc-item"><a href="#Interpolation" data-toc-modified-id="Interpolation-42"><span class="toc-item-num">4.2 </span>Interpolation</a></div><div class="lev2 toc-item"><a href="#A-Quick-Detour-into-some-Data-Viz" data-toc-modified-id="A-Quick-Detour-into-some-Data-Viz-43"><span class="toc-item-num">4.3 </span>A Quick Detour into some Data Viz</a></div><div class="lev1 toc-item"><a href="#Merge,-Join,-Concatenate" data-toc-modified-id="Merge,-Join,-Concatenate-5"><span class="toc-item-num">5 </span>Merge, Join, Concatenate</a></div><div class="lev2 toc-item"><a href="#Merge" data-toc-modified-id="Merge-51"><span class="toc-item-num">5.1 </span>Merge</a></div><div class="lev2 toc-item"><a href="#Join" data-toc-modified-id="Join-52"><span class="toc-item-num">5.2 </span>Join</a></div><div class="lev2 toc-item"><a href="#Concatenate" data-toc-modified-id="Concatenate-53"><span class="toc-item-num">5.3 </span>Concatenate</a></div><div class="lev1 toc-item"><a href="#Grouping,-a.k.a.-split-apply-combine" data-toc-modified-id="Grouping,-a.k.a.-split-apply-combine-6"><span class="toc-item-num">6 </span>Grouping, a.k.a. split-apply-combine</a></div><div class="lev2 toc-item"><a href="#Apply" data-toc-modified-id="Apply-61"><span class="toc-item-num">6.1 </span>Apply</a></div><div class="lev2 toc-item"><a href="#Map" data-toc-modified-id="Map-62"><span class="toc-item-num">6.2 </span>Map</a></div><div class="lev2 toc-item"><a href="#ApplyMap" data-toc-modified-id="ApplyMap-63"><span class="toc-item-num">6.3 </span>ApplyMap</a></div><div class="lev1 toc-item"><a href="#Pivot-Tables" data-toc-modified-id="Pivot-Tables-7"><span class="toc-item-num">7 </span>Pivot Tables</a></div><div class="lev2 toc-item"><a href="#Sales-Reports" data-toc-modified-id="Sales-Reports-71"><span class="toc-item-num">7.1 </span>Sales Reports</a></div><div class="lev2 toc-item"><a href="#Tips" data-toc-modified-id="Tips-72"><span class="toc-item-num">7.2 </span>Tips</a></div><div class="lev2 toc-item"><a href="#Bada-Bing!" data-toc-modified-id="Bada-Bing!-73"><span class="toc-item-num">7.3 </span>Bada Bing!</a></div><div class="lev1 toc-item"><a href="#Basic-Statistical-Operations/Explorations" data-toc-modified-id="Basic-Statistical-Operations/Explorations-8"><span class="toc-item-num">8 </span>Basic Statistical Operations/Explorations</a></div>
# Pandas: Introduction
**Pandas** is Python's library for dealing with structured or tabular data. It's main contributor, Wes McKinney was inspired by R's `data.frame`, and implemented it for Python. <br>
It combines the speed of NumPy with the ease of SQL, as per Wes, and I completely agree with that.
If you have used R, and the dplyr package, you know how easy it is to manipulate data with it.
We will be learning about various methods to deal with data, and occasionally we will make things a little challenging so as to replicate/mimic real world conditions. And while at it, we will throw in visualisations using Matplotlib too! The best way to learn is to write code yourself, but don't worry if you don't understand all of it in the first go. And of course, feel free to take a step back and revisit [Lesson 10](10.Visualise_This-Tutorial01.ipynb).
By the end of it, we should have dealt with about a few case studies, which should be an excellent start for your portfolio.
We will cover at the very least the following topic:
* Indexing and Selection
* Creating new columns
* Renaming
* Grouping
* Handling missing values
* Merge, join
* map(), apply(), applymap()
* Pivot Tables
* Basic statistics
* Plots (throughout the exercise)
I say "at the very least" because in my opinion, this is the bare minimum you should know to handle data science problems 'in the wild', as in, problems that aren't toy problems, and the kind that data scientists deal with every day.
## Importing Libraries
As usual, we begin by importing our libraries. Just as with NumPy, where we import it as `np`, we will import the pandas library as `pd`. It's just convention, and you're free to import it as `chuck_norris`, `really_long_name_for_reason_in_particular` or just plain and simple, `pd`.
End of explanation
"""
import pandas as pd
# From Scalar Values
series_1 = pd.Series([1,2,3,4,5])
series_1
"""
Explanation: Data Structures
There are three fundamental data structures supported by Pandas:<br>
* Series: a one-dimensional labeled array capable of holding any data type (integers, strings, floating point numbers, Python objects, etc.). For those coming from an R background, Series is much like a Vector.
* DataFrame: a 2-dimensional labeled data structure with columns of potentially different types.
* Panel: also called longitudinal data or cross-sectional time series data, is data where multiple cases (people, firms, countries etc) were observed at two or more time periods. This is rarely used though, and I personally haven't come across this except for some Econometrics courses I had taken in my undergraduate years.
Series
The basic format to creat a series is:<br>
series_a = pd.Series(data, index = index_name)
The default value for the index is 1,2,3,4....and so on, and doesn't not need to be specified, except in the case of scalars.
End of explanation
"""
series_1 = pd.Series([1,2,3,4,5], index = ['Mon','Tue','Wed','Thu','Fri'])
series_1
series_2 = pd.Series(1.0, index = ['a','b','c','d','e'])
series_2
import pandas as pd
import numpy as np
# From an array
# Just copy this for now, we'll cover the 'seed' in DataFrames
np.random.seed(42)
series_3 = pd.Series(np.random.randn(5))
series_3
np.random.seed(42)
series_3 = pd.Series(np.random.randn(5), index = ['a','b','c','d','e'])
series_3
np.random.seed(42)
ind_1 = ['a','b','c','d','e']
series_3 = pd.Series(np.random.randn(5), index = ind_1)
series_3
series_4 = pd.Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
series_4
"""
Explanation: Notice the 0,1,2,3... on the left side? That's called the Index. It starts from 0, but you can rename it.
End of explanation
"""
series_4['a'] == series_4[0]
series_4[series_4>3]
series_4[series_4%2==0]
series_5 = pd.Series([1,2,3,4,5], index = ['HP', 'GS', 'IBM', 'AA', 'FB'])
series_5
series_5['IBM']
tech_pf1 = series_5[['HP', 'IBM', 'FB']]
tech_pf1
# From a Dictionary
dict_01 = {'Gavin' : 50, 'Russ' : 100, 'Erlich' : 150}
series_6 = pd.Series(dict_01)
series_6
# Reordering the previous series
index = ['Gavin', 'Russ', 'Erlich', 'Peter']
series_7 = pd.Series(dict_01, index=index)
series_7
"""
Explanation: We can subset and get values from the series.
End of explanation
"""
pd.isnull(series_7)
"""
Explanation: Notice the NaN, which stands for Not a Number. We will be dealing with it extensively when working with DataFrames. It is an indicator for missing or corrupted data. Here's how we test for it.
End of explanation
"""
# Pandas is very smart, and aligns the series for mathematical operations
series_6 + series_7
# Renaming an Index
series_7.index.name = "Names"
series_7
# Naming a Series
series_7.name = "SV"
series_7
"""
Explanation: And here's a nice discussion on the topic from our friends at StackOverflow.
End of explanation
"""
goals = pd.Series([20,19,21,24,1], index = ["Messi", "Neymar", "Zlatan", "Ronaldo", "N’Gog"])
goals
# Who scored less than 20 goals?
goals[goals<20]
# What is the average number of goals scored?
goals.mean()
# What is the median number of goals scored?
goals.median()
# What is the range of goals scored? (Range = Max - Min)
goals_range = goals.max() - goals.min()
print(goals_range)
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
# Plot the goals in a bar chart
goals.plot(kind = "bar")
# Let's beautify that a little
goals.plot(kind = "barh", title = "Goal Scorers")
"""
Explanation: Mini-Project
End of explanation
"""
import pandas as pd
import numpy as np
# Let's start with a standard array
arr1 = np.array([[40,40,75,95],[80,85,120,130],
[155,160,165,170],[200,245,250,260]])
print(arr1.shape)
print(arr1.size)
print(arr1)
# It is quite common to assign a dataframe the name 'df', although you can
# use a relevant name, such baseball_stats or book_sales
# It's always good to use context driven names - you should code expecting
# someone else to read it a few months down the line
df = pd.DataFrame(arr1, index = "Peter,Clarke,Bruce,Tony".split(","),
columns = "Jan,Feb,Mar,Apr".split(","))
df
"""
Explanation: Read more about these here.
DataFrames
DataFrames is in many respects, the real Pandas. Usually, if you're using Pandas, it will be to use DataFrames.<br>
We will begin with creating DataFrames, and the usual indexing and selection mechanisms. In reality, you will probably never have to 'create' a DataFrame, but practice these skills here to get comfortable with heirarchies, indices and selections. Then we will move on to reading data from multiple formats, including spreadsheets, JSON files and API endpoints.
By the way, during these examples, we will always set seed first when generating random numbers. If you're coming from R, this is the same as set.seed(). In Python, we use the random.seed statement from numpy, which you can read about here. You can set it to any number you like, and I usually set it to 42 just out of habit, but there's not to say you can't set it to an arbitrary number like 27 or 2012. Use the same numbers as this notebook though to replicate the results. Also note that we need to mention it in every cell that we want the results replicated.
You will see later about how this is good practice especially when sharing your work with other members of the team - they will be able to reproduce your work on their machines due to the pseudo-random number that is generated algorithmically.
End of explanation
"""
df = pd.DataFrame(arr1, index = "Peter,Clarke,Bruce,Tony".split(","),
columns = "Jan,Feb,Mar,Apr".split(","))
df
# Selecting columns
df[['Jan']]
df[['Jan','Feb']]
df[['Mar','Jan']]
"""
Explanation: Indexing and Selection
Selecting Columns
End of explanation
"""
df['Jan']
print('Series:', type(df['Jan']))
print('DataFrame:',type(df[['Jan']]))
"""
Explanation: It's interesting to note that the offical Pandas documentation refers to DataFrames as:
Can be thought of as a dict-like container for Series objects.
You can access it as a Series as below:
End of explanation
"""
df = pd.DataFrame(arr1, index = "Peter,Clarke,Bruce,Tony".split(","),
columns = "Jan,Feb,Mar,Apr".split(","))
df
# For selecting by Label
df.loc[['Tony']]
df.loc[['Peter','Bruce']]
df.loc[['Peter','Bruce'],['Jan','Feb']]
# All of Peter's data
df.loc[["Peter"]][:]
df.loc["Peter"][:]
df
# Integer-location based indexing for selection by position
# Note how this returns a Dataframe
df.iloc[[0]]
# and this returns a Series
df.iloc[0]
# Narrowing down further
df.iloc[[0],[1]]
# Replicating the results from our use of the loc statement
df.iloc[[0,2]]
# Compare to df.loc[['Peter','Bruce'],['A','D']]
df.iloc[[0,2],[0,3]]
"""
Explanation: Using loc and iloc
End of explanation
"""
df.ix[0:3]
df.iloc[0:3]
"""
Explanation: There's another function named ix. I have rarely used it, and both loc and iloc take care of all my selection needs. You can read about it here.
Also, check out the similarity of outputs below:
End of explanation
"""
df
df[df%2 == 0]
df%2 == 0
df < 100
df[df<100]
df
df[df['Jan']>100][['Apr']]
df[df['Jan']<100][['Feb','Apr']]
# Using multiple conditions
df[(df['Jan'] >= 80) & (df['Mar']>100)]
"""
Explanation: Conditional Selection
While exploring data sets, one often has to use conditional selection. Or this could be true for creating subsets to work.
End of explanation
"""
df = pd.DataFrame(arr1, index = "Peter,Clarke,Bruce,Tony".split(","), columns = "Jan,Feb,Mar,Apr".split(","))
df
df["Dec"] = df["Jan"] + df["Mar"]
df
"""
Explanation: Did you notice that we used & instead of and? When using Pandas, we have to use the symbol, not the word. Here's a StackOverflow discussion on this.
Creating New Columns
End of explanation
"""
df
df.drop('Dec', axis = 1)
"""
Explanation: Removing Columns
While fundamentally adding and removing columns ought to be similar operations, there are a few differences. Let's see if you can figure it out.
End of explanation
"""
df
"""
Explanation: First, we had to mention the axis. 0 is for rows, 1 is for columns.
End of explanation
"""
df.drop('Dec', axis = 1, inplace = True)
df
"""
Explanation: Why is 'Dec' still there? Here lies the difference - while removing columns, we have to specify that the operation should be inplace. Read about it in the official documentation.
End of explanation
"""
df.drop('Tony', axis = 0)
# Renaming Columns
df.rename(columns={'Jan': 'January'}, inplace=True)
df
df.rename(columns={'Feb': 'February', 'Mar': 'March', 'Apr': 'April'}, inplace=True)
df
"""
Explanation: And just for the sake of completion, let's temporarily kick out Tony from the table. Temporary, since it's not inplace.
End of explanation
"""
dict1 = {'first_name': ['Erlich', 'Richard', "Dinesh", 'Gilfoyle', 'Nelson'],
'second_name': ['Bachman', 'Hendricks', np.nan, np.nan, 'Bighetti'],
'occupation': ['Investor', 'Entrepreneur', 'Coder', 'Coder', 'Bench Warmer'],
'age': [40, 30, 28, 29, 28]}
df = pd.DataFrame(dict1, columns = ['first_name', 'second_name','occupation', 'age'])
df
# Who is under 30 years of age?
df[df["age"]<30]
# Who are the coders?
df[df["occupation"] == "Coder"]
# Multiple Conditions : Coders, below 30
# Not that conditions are Booleans, as shown below
coders = df["occupation"] == "Coder"
und_30 = df["age"]<30
df[coders & und_30]
df[df["second_name"].notnull()]
"""
Explanation: Dataframe from a Dictionry
Let's create a new dataframe from a dictionary, and then apply some of the selection techniques we just learnt.
End of explanation
"""
np.random.seed(42)
np.random.randn(4,4)
np.random.seed(42)
df = pd.DataFrame(np.random.randn(4,4), index = "Peter,Clarke,Bruce,Tony".split(","), columns = "Jan,Feb,Mar,Apr".split(","))
df
# Who scored greater than 0 in Apr?
df[df>0][["Apr"]]
# Who scored below 0 in March?
# In which month/months did Clarke score above 0?
# Find the highest scores for each month
# Hint: .max()
# Find the lowest scores for each month
# Plot the higest score for each month in a bar graph
"""
Explanation: Exercise
End of explanation
"""
df = pd.DataFrame({'NYC':[3,np.nan,7,9,6],
'SF':[4,3,8,7,15],
'CHI':[4,np.nan,np.nan,14,6],
'MIA':[3, 9,12,8,9]}, index = ['Mon','Tue','Wed','Thu','Fri'])
df
"""
Explanation: Handling Missing Data
Pay special attention to this section. If needed, spend some extra time to cover all the relevant techniques. <br>
Never in my experience have I come across a 100% clean data set "in the wild". What that means is that of course you will find that most data sets that you train with to be complete, but real world data is messy and incomplete.
Even when working with high quality, financial data from exchanges, they might often have missing data points. The less said about unstructured data like text, the better.
TL/DR: If you're going to fight Mike Tyson, don't train to fight Mr Bean.
<img src="images/bean_box.jpg">
What is Missing Data?
Data can be missing because:
* It was never captured
* The data does not exist
* It was captured but got corrupted
In Pandas, missing data will be represented as None or NaN.
End of explanation
"""
df.dropna()
df.dropna(axis = 0)
df.dropna(axis = 1)
"""
Explanation: First thing we can do is drop rows with missing values with the dropna() function. By default, rows are dropped, but you can change this to columns as well.
End of explanation
"""
df2 = df.copy()
df2
df2.mean()
# Are these really the means though?
df
mean = df2['SF'].mean()
mean
"""
Explanation: While this can be helpful in some ways, if your dataset is small, you are losing a significant portion of your data.
For example, if 100 rows out of 1 million rows have missing data, that's negligible, and can potentially be thrown away. What if you have 10 out of 85 rows with incorrect, unusable or missing data?
End of explanation
"""
df = pd.DataFrame({'NYC':[3,np.nan,7,9,6],
'SF':[4,3,8,7,15],
'CHI':[4,np.nan,np.nan,14,6],
'MIA':[3, 9,12,8,9]}, index = ['Mon','Tue','Wed','Thu','Fri'])
df
df.mean()
df.fillna(value = df.mean(), inplace = True)
df
df = pd.DataFrame({'NYC':[3,np.nan,7,9,6],
'SF':[4,3,8,7,15],
'CHI':[4,np.nan,np.nan,14,6],
'MIA':[3, 9,12,8,9]}, index = ['Mon','Tue','Wed','Thu','Fri'])
df
df3 = df.copy()
df3
median = df3['SF'].median()
median
df3.fillna(value = median, inplace = True)
df3
df3.mode()
"""
Explanation: Imputation
Using the fillna function, we can replace missing values.
End of explanation
"""
baby_names = {
'id': ['101', '102', '103', '104', '105'],
'first_name': ['Emma', 'Madison', 'Hannah', 'Grace', 'Emily']
}
df_baby = pd.DataFrame(baby_names, columns = ['id', 'first_name'])
df_baby
df_baby.columns
df_baby["gender"] = "F"
df_baby
df_baby['gender'] = 0
df_baby
"""
Explanation: But sometimes, the data isn't part of the table. Consider the scenario below. We know that the below tables contains names of female babies. But it's missing in our dataset.
End of explanation
"""
df = pd.read_csv("data/cafe_sales2015.csv")
df
df["Date"].head()
df["Date"] = pd.to_datetime(df["Date"])
df.set_index(["Date"], inplace = True)
df.head()
df.tail()
df.head(3)
df.describe()
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (20,5)
df.plot(kind="line")
df["Water"].plot(kind="line")
df.interpolate(method = "linear", inplace = True)
df.head(5)
df.interpolate().count()
df[["Latte", "Water"]].plot(kind="line")
"""
Explanation: Interpolation
Read up more on the interpolate function here and here
End of explanation
"""
import vincent
vincent.core.initialize_notebook()
line = vincent.Line(df)
line.axis_titles(x='Date', y='Amount')
line = vincent.Line(df[["Latte", "Water"]])
line.axis_titles(x='Date', y='Amount')
stacked = vincent.StackedArea(df)
stacked.axis_titles(x='Date', y='Amount')
stacked.legend(title='Cafe Sales')
stacked.colors(brew='Spectral')
"""
Explanation: Keep in mind though, that these are at best approximations.
A Quick Detour into some Data Viz
Install Vincent by running the following line in your command line:
Python 2.x: pip install vincent <br>
Python 3.x: pip3 install vincent
End of explanation
"""
customers = {
'customer_id': ['101', '102', '103', '104', '105'],
'first_name': ['Tony', 'Silvio', 'Paulie', 'Corrado', 'Christopher'],
'last_name': ['Soprano', 'Dante', 'Gualtieri', 'Soprano', 'Moltisanti']}
df_1 = pd.DataFrame(customers, columns = ['customer_id', 'first_name', 'last_name'])
df_1
orders = {
'customer_id': ['101', '104', '105', '108', '111'],
'order_date': ['2015-01-01', '2015-01-08', '2015-01-19', '2015-02-10', '2015-02-11'],
'order_value': ['10000', '25000', '1100', '5000', '4400']}
df_2 = pd.DataFrame(orders, columns = ['customer_id', 'order_date', 'order_value'])
df_2
pd.merge(df_1, df_2, how = 'inner', on = 'customer_id')
pd.merge(df_1, df_2, how = 'left', on = 'customer_id')
pd.merge(df_1, df_2, how = 'right', on = 'customer_id')
pd.merge(df_1, df_2, how = 'outer', on = 'customer_id')
"""
Explanation: Read about using the Vincent package here.
The latest update to Matplotlib, V 2.0.0 has really improved the quality of the graphics, but it's still not quite production ready, while on the positive side, it is stable and has a large community of people who use it. Niche packages like Vincent can produce some amazing graphics right out of the box with minimal tweaking, but they may not be very mature. Nevertheless, as Data Scientists, it's good to learn about new packages, especially those that help you communicate your results to a non-technical audience. If people don't understand what you do, they won't think what you do is important!
Merge, Join, Concatenate
<img src="images/sql-joins.png">
Image Source: http://www.datapine.com/blog/sql-joins-and-data-analysis-using-sql/
Merge
End of explanation
"""
customers = {
'customer_id': ['101', '102', '103', '104', '105'],
'first_name': ['Tony', 'Silvio', 'Paulie', 'Corrado', 'Christopher'],
'last_name': ['Soprano', 'Dante', 'Gualtieri', 'Soprano', 'Moltisanti']}
customers
orders = {
'customer_id': ['101', '104', '105', '108', '111'],
'order_date': ['2015-01-01', '2015-01-08', '2015-01-19', '2015-02-10', '2015-02-11'],
'order_value': ['10000', '25000', '1100', '5000', '4400']}
orders
df1_new = pd.DataFrame.from_dict(customers, orient='columns', dtype=None)
df1_new
df1_new = df1_new.set_index('customer_id')
df1_new
df2_new = pd.DataFrame.from_dict(orders, orient='columns', dtype=None)
df2_new
df2_new = df2_new.set_index('customer_id')
df2_new
df1_new.join(df2_new,how = "inner")
df1_new.join(df2_new,how = "outer")
df1_new.join(df2_new,how = "left")
df1_new.join(df2_new,how = "right")
# Alternate Way : I don't recommend this
df_1.join(df_2, on = "customer_id", lsuffix='_l', rsuffix='_r')
"""
Explanation: Join
End of explanation
"""
customers = {
'customer_id': ['101', '102', '103', '104', '105'],
'first_name': ['Tony', 'Silvio', 'Paulie', 'Corrado', 'Christopher'],
'last_name': ['Soprano', 'Dante', 'Gualtieri', 'Soprano', 'Moltisanti']}
df_1 = pd.DataFrame(customers, columns = ['customer_id', 'first_name', 'last_name'])
df_1
orders = {
'customer_id': ['101', '104', '105', '108', '111'],
'order_date': ['2015-01-01', '2015-01-08', '2015-01-19', '2015-02-10', '2015-02-11'],
'order_value': ['10000', '25000', '1100', '5000', '4400']}
df_2 = pd.DataFrame(orders, columns = ['customer_id', 'order_date', 'order_value'])
df_2
pd.concat([df_1,df_2])
pd.concat([df_1,df_2],axis=0)
pd.concat([df_1,df_2],axis=1)
"""
Explanation: Concatenate
End of explanation
"""
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams["figure.figsize"] = (15,7)
paintball = {'Team': ['Super Ducks','Super Ducks', 'Super Ducks', 'Super Ducks', 'Super Ducks', 'Bobcats', 'Bobcats', 'Bobcats', 'Bobcats', 'Tigers', 'Tigers', 'Tigers', 'Tigers','Tigers','Tigers'],
'Name': ['Tony', 'Antonio', 'Felipe', 'Ryan', 'Mario', 'Sergio', 'Tanaka', 'Anderson', 'Joe', 'Floyd', 'Manny', 'Chris', 'Junior', 'George','Brock'],
'Kills': ['1', '1', '1', '4', '3', '2', '2', '2','5', '1', '1', '7', '4','8','5'],
'Shots Fired Before': [17, 19, 22, 8, 13, 85, 64, 49, 74, 14, 20, 24,13,31,37],
'Shots Fired After': [41, 73, 57, 30, 74, 37, 28, 40, 43, 18, 19, 21,13,32,39]}
df = pd.DataFrame(paintball, columns = ['Team', 'Name', 'Shots Fired Before', 'Shots Fired After','Kills'])
df
df.groupby('Team').mean()
byteam = df.groupby('Team')
byteam.count()
byteam.describe()
byteam.describe().transpose()['Bobcats']
Team_Before = df[['Shots Fired Before']].groupby(df['Team']).mean()
Team_After = df[['Shots Fired After']].groupby(df['Team']).mean()
Team_Before
Team_After
Team_Before.join(Team_After)
plt.style.use('ggplot')
plt.rcParams["figure.figsize"] = (15,7)
Team_Before.join(Team_After).plot(kind="Bar")
"""
Explanation: One final resource on why you would want to perform these operations in Pandas - and evidence on how fast it really is! http://wesmckinney.com/blog/high-performance-database-joins-with-pandas-dataframe-more-benchmarks/
Grouping, a.k.a. split-apply-combine
While analysing data, a Data Scientist has to very often perform aggregations, perform transformation ops like standardising data, and filter through the dataset to look at only relevant samples.
This is what the groupby function is primarily used for.
Read more here.
End of explanation
"""
plt.style.available
"""
Explanation: Cool graph, but can we improve it, visually speaking? Yes of course we can! Let's look at some of the styles available within Matplotlib.
End of explanation
"""
plt.style.use('ggplot')
plt.rcParams["figure.figsize"] = (15,7)
Team_Before.join(Team_After).plot(kind="Bar")
"""
Explanation: Personally I am quite partial to ggplot and seaborn, but not so much to fivethirtyeight. Let's try these.
End of explanation
"""
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (15,7)
Team_Before.join(Team_After).plot(kind="Bar")
"""
Explanation: What about fivethirtyeight?
End of explanation
"""
plt.style.use('seaborn')
plt.rcParams["figure.figsize"] = (15,7)
Team_Before.join(Team_After).plot(kind="Bar")
plt.rcParams.update(plt.rcParamsDefault)
plt.style.use('seaborn-poster')
plt.rcParams["figure.figsize"] = (15,7)
Team_Before.join(Team_After).plot(kind="Bar")
pd.crosstab(df["Team"], df["Kills"], margins = True)
plt.rcParams.update(plt.rcParamsDefault)
%matplotlib inline
plt.rcParams["figure.figsize"] = (15,7)
plt.style.use('seaborn-deep')
df.groupby('Kills').mean().plot(kind="bar")
"""
Explanation: And seaborn. Note that seaborn is a visualisation library that works with Matplotlib. You can mimic the style without actually using it.
End of explanation
"""
import pandas as pd
import numpy as np
df = pd.read_csv("data/cafe_sales2015.csv")
df.head()
df["Date"] = pd.to_datetime(df["Date"])
df.set_index(["Date"], inplace = True)
df.interpolate(method = "linear", inplace = True)
df.head()
#print(df.apply(np.cumsum))
df.apply(np.average)
df.apply(lambda x: x.max() - x.min())
# What columns have missing values?
df.apply(lambda x: sum(x.isnull()),axis=0)
# Using Apply to find missing values
# Obviously don't do this for datasets with thousands or millions of rows!
empty = df.apply(lambda col: pd.isnull(col))
empty
"""
Explanation: Apply
We can use the apply function to perform an operation over an axis in a dataframe.
End of explanation
"""
import pandas as pd
import numpy as np
df = pd.read_csv("data/cafe_sales2015.csv")
df.head()
df["Latte"] = df["Latte"].map(lambda x: x+2)
df.head()
df.interpolate(method = "linear", inplace = True)
df["Water"] = df["Water"].map(lambda x: x-1 if (x>0) else 0)
df.head()
"""
Explanation: Map
The map function iterates over each element of a series.
End of explanation
"""
import pandas as pd
import numpy as np
df = pd.read_csv("data/cafe_sales2015.csv")
df.head()
def to_int(x):
if type(x) is float:
x = int(x)
return x
else:
return x
df.interpolate(method = "linear", inplace = True)
df.applymap(to_int).head()
"""
Explanation: ApplyMap
End of explanation
"""
import pandas as pd
import numpy as np
# The 'xlrd' module gets imported automatically, if not, install it with 'pip install xlrd'
df = pd.read_excel("Data/bev-sales.xlsx")
df.head()
df.tail()
df.describe()
help(pd.pivot_table)
df.head()
pd.pivot_table(df,index=["Sales Exec"],values=["Revenue"],aggfunc="sum")
%matplotlib inline
import matplotlib.pyplot as plt
pd.pivot_table(df, index=["Sales Exec"],values=["Revenue"],aggfunc="sum").plot(kind="bar")
pd.pivot_table(df,index=["Sales Exec"],values=["Revenue"],aggfunc="mean")
pd.pivot_table(df, index=["Sales Exec", "Item"], values=["Revenue"], aggfunc="sum")
pd.pivot_table(df,index=["Sales Exec"],values=["Revenue"],aggfunc=[np.sum])
pd.pivot_table(df,index=["Sales Exec"],values=["Units sold", "Revenue"],aggfunc=[np.sum])
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn')
plt.rcParams["figure.figsize"] = (15,7)
pd.pivot_table(df,index=["Sales Exec", "Item"],values=["Revenue"],aggfunc=[np.sum]).plot(kind="bar")
plt.title('January Sales Report')
pd.pivot_table(df,index=["Sales Exec", "Item"],values=["Units sold", "Revenue"],
columns=["Price per Unit"], aggfunc="sum", margins = True)
"""
Explanation: Further Reading<br>
Wes McKinney's amazing book covers this issue. Refer to Page 132.
Pivot Tables
Pivot tables are summarisation tables that help the user sort, count, total or average the data available in a dataset. If you have used Excel, you will be very familiar with them. If not, let's look at it from a fresh Pandas perspective.
Typically, there are four parameters, but you don't always have to specify every one of them, as we will see in the examples below.
index: An array of the dataset that will used as indices to our new reshaped and aggregated DataFrame
columns: An array of the dataset that will provide columns to the new DataFrame
values: These are the values we wish to aggregate in each cell.
aggfunc: The function we will use to perform the aggregation
Sales Reports
End of explanation
"""
df = pd.read_csv("Data/tips.csv")
df.head()
df["tip_pc"] = df["tip"] / df["total_bill"]
df.head()
pd.pivot_table(df,index=["sex"], values = ["tip_pc"], aggfunc="mean")
pd.pivot_table(df, index = ["smoker", "sex"], values = ["tip_pc"], aggfunc = "mean")
pd.pivot_table(df,index=["sex"], values = ["total_bill","tip"], aggfunc="sum")
"""
Explanation: Tips
End of explanation
"""
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
df = pd.read_excel("Data/Sopranos/sopranos-killings.xlsx")
df.head()
pd.pivot_table(df,index=["Cause of Death"],values = ["Season"], aggfunc="first")
pd.pivot_table(df,index=["Cause of Death"],values = ["Season"], aggfunc="count", margins=True)
whacked = pd.pivot_table(df,index=["Cause of Death"],values = ["Season"], aggfunc="count")
whacked
plt.style.available
plt.rcParams.update(plt.rcParamsDefault)
%matplotlib inline
plt.style.use('seaborn-deep')
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
whacked.plot(kind = "bar", legend=None)
plt.title('How People Died on The Sopranos')
with plt.style.context('ggplot', after_reset=True):
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (15,7)
whacked.plot(kind = "bar", legend=None)
plt.title('How People Died on The Sopranos')
killer = pd.pivot_table(df,index=["Killer"],values = ["Season"], aggfunc="count")
killer = killer.sort_values(by=["Season"], ascending = False)
killer
plt.rcParams.update(plt.rcParamsDefault)
plt.style.use('ggplot')
plt.rcParams["figure.figsize"] = (15,7)
killer[:10].plot(kind = "bar", legend=None)
plt.title('Top 10 Killers')
"""
Explanation: Bada Bing!
End of explanation
"""
import pandas as pd
import numpy as np
df = pd.read_csv("data/cafe_sales2015.csv")
df["Date"] = pd.to_datetime(df["Date"])
df.set_index(["Date"], inplace = True)
df.interpolate(method = "linear", inplace = True)
df.head()
df.tail()
df.describe()
print("Mean\n", df.mean())
print("\n\nMedian\n", df.median())
print("\n\nMode\n", df.mode())
print("The Maximum value is:\n",df.max())
print("\n\nThe Minimum value is:\n",df.min())
print("\n\nKurtosis:\n",df.kurtosis())
"""
Explanation: Basic Statistical Operations/Explorations
End of explanation
"""
|
jluttine/bayespy
|
doc/source/examples/multinomial.ipynb
|
mit
|
n_colors = 5 # number of possible colors
n_bags = 3 # number of bags
n_trials = 20 # number of draws from each bag
"""
Explanation: Multinomial distribution: bags of marbles
Written by: Deebul Nair (2016)
Edited by: Jaakko Luttinen (2016)
Inspired by https://probmods.org/hierarchical-models.html
Using multinomial distribution
There are several bags of coloured marbles, each bag containing different amounts of each color. Marbles are drawn at random with replacement from the bags. The goal is to predict the distribution of the marbles in each bag.
Data generation
Let us create a dataset. First, decide the number of bags, colors and trials (i.e., draws):
End of explanation
"""
from bayespy import nodes
import numpy as np
p_colors = nodes.Dirichlet(n_colors * [0.5], plates=(n_bags,)).random()
"""
Explanation: Generate randomly a color distribution for each bag:
End of explanation
"""
import bayespy.plot as bpplt
bpplt.hinton(p_colors)
bpplt.pyplot.title("Original probability distributions of colors in the bags");
"""
Explanation: The concentration parameter $\begin{bmatrix}0.5 & \ldots & 0.5\end{bmatrix}$ makes the distributions very non-uniform within each bag, that is, the amount of each color can be very different. We can visualize the probability distribution of the colors in each bag:
End of explanation
"""
marbles = nodes.Multinomial(n_trials, p_colors).random()
print(marbles)
"""
Explanation: As one can see, the color distributions aren't very uniform in any of the bags because of the small concentration parameter. Next, make the ball draws:
End of explanation
"""
%%tikz -f svg
\usetikzlibrary{bayesnet}
\node [latent] (theta) {$\theta$};
\node [below=of theta, obs] (y) {$y$};
\edge {theta} {y};
\plate {trials} {(y)} {trials};
\plate {bags} {(theta)(y)(trials)} {bags};
"""
Explanation: Model
We will use the same generative model for estimating the color distributions in the bags as we did for generating the data:
$$
\theta_i \sim \mathrm{Dirichlet}\left(\begin{bmatrix} 0.5 & \ldots & 0.5 \end{bmatrix}\right)
$$
$$
y_i | \theta_i \sim \mathrm{Multinomial}(\theta_i)
$$
The simple graphical model can be drawn as below:
End of explanation
"""
theta = nodes.Dirichlet(n_colors * [0.5], plates=(n_bags,))
y = nodes.Multinomial(n_trials, theta)
"""
Explanation: The model is constructed equivalently to the generative model (except we don't use the nodes to draw random samples):
End of explanation
"""
y.observe(marbles)
"""
Explanation: Data is provided by using the observe method:
End of explanation
"""
from bayespy.inference import VB
Q = VB(y, theta)
Q.update(repeat=1000)
import bayespy.plot as bpplt
bpplt.hinton(theta)
bpplt.pyplot.title("Learned distribution of colors")
bpplt.pyplot.show()
"""
Explanation: Performing Inference
End of explanation
"""
from bayespy import nodes
import numpy as np
#The marbles drawn based on the distribution for 10 trials
# Using same p_color distribution as in the above example
draw_marbles = nodes.Categorical(p_colors,
plates=(n_trials, n_bags)).random()
"""
Explanation: Using categorical Distribution
The same problem can be solved with categorical distirbution. Categorical distribution is similar to the Multinomical distribution expect for the output it produces.
Multinomial and Categorical infer the number of colors from the size of the probability vector (p_theta)
Categorical data is in a form where the value tells the index of the color that was picked in a trial. so if n_colors=5, Categorical data could be [4, 4, 0, 1, 1, 2, 4] if the number of trials was 7.
multinomial data is such that you have a vector where each element tells how many times that color was picked, for instance, [3, 0, 4] if you have 7 trials.
So there is significant difference in Multinomial and Categorical data . Depending on the data you have the choice of the Distribution has to be made.
Now we can see an example of Hierarchical model usign categorical data generator and model
End of explanation
"""
from bayespy import nodes
import numpy as np
p_theta = nodes.Dirichlet(np.ones(n_colors),
plates=(n_bags,),
name='p_theta')
bag_model = nodes.Categorical(p_theta,
plates=(n_trials, n_bags),
name='bag_model')
"""
Explanation: Model
End of explanation
"""
bag_model.observe(draw_marbles)
from bayespy.inference import VB
Q = VB(bag_model, p_theta)
Q.update(repeat=1000)
%matplotlib inline
import bayespy.plot as bpplt
bpplt.hinton(p_theta)
bpplt.pyplot.tight_layout()
bpplt.pyplot.title("Learned Distribution of colors using Categorical Distribution")
bpplt.pyplot.show()
"""
Explanation: Inference
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/ipsl/cmip6/models/sandbox-2/land.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-2', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: IPSL
Source ID: SANDBOX-2
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-20 15:02:45
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
landmanbester/fundamentals_of_interferometry
|
2_Mathematical_Groundwork/fft_implementation_assignment.ipynb
|
gpl-2.0
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import cmath
"""
Explanation: Implementation of a Radix-2 Fast Fourier Transform
Import standard modules:
End of explanation
"""
def loop_DFT(x):
"""
Implementing the DFT in a double loop
Input: x = the vector we want to find the DFT of
"""
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
X = np.zeros(N, dtype=complex)
for k in range(N):
for n in range(N):
X[k] += np.exp(-1j * 2.0* np.pi* k * n / N) * x[n]
return X
def matrix_DFT(x):
"""
Implementing the DFT in vectorised form
Input: x = the vector we want to find the DFT of
"""
#Get the length of the vector (will only work for 1D arrays)
N = x.size
#Create vector to store result in
n = np.arange(N)
k = n.reshape((N,1))
K = np.exp(-1j * 2.0 * np.pi * k * n / N)
return K.dot(x)
"""
Explanation: This assignment is to implement a python-based Fast Fourier Transform (FFT). Building on $\S$ 2.8 ➞ we will implement a 1-D radix-2 Cooley-Tukey-based FFT using both decimation in time (DIT) and decimation in frequency (DIF) for an $N = 2^n$ input function.
From $\S$ 2.8.2 ➞ the discrete Fourier transform (DFT) is defined as:
$$ \mathscr{F}{\rm D}{y}_k = Y_k = \sum{n\,=\,0}^{N-1} y_n\,e^{-\imath 2\pi \frac{nk}{N}}, $$
That is, the $k^{th}$ element of the Fourier transformed spectrum $Y$ is a sum over all $n$ elements of the function $y$, each multipled by a complex twiddle factor $e^{-\imath 2\pi \frac{nk}{N}}$. In $\S$ 2.8.5 ➞ two methods for computing the DFT for a size $N = 2^n$ discrete function. A double loop to compute all elements of the Fourier-transformed spectrum, and a matrix multiplication by generating the Fourier kernel $K$. The compute time to perform the DFT is $\mathcal{O}(N^2)$, this is it takes $cN^2$ operations where $c > 1$ is a constant factor. Though as note in $\S$ 2.8.5 ➞ the matrix implementation is much fast that the loop because this algorithm takes advantage of fast vector math libraries.
The DFT code is replicated here as it will be used to compare our implementation of the FFT:
End of explanation
"""
def one_layer_FFT(x):
"""An implementation of the 1D Cooley-Tukey FFT using one layer"""
N = x.size
if N%2 > 0:
print "Warning: length of x is not a power of two, returning DFT"
return matrix_DFT(x)
else:
X_even = matrix_DFT(x[::2])
X_odd = matrix_DFT(x[1::2])
factor = np.exp(-2j * np.pi * np.arange(N) / N)
return np.concatenate([X_even + factor[:N / 2] * X_odd, X_even + factor[N / 2:] * X_odd])
"""
Explanation: In $\S$ 2.8.6 ➞ the fast Fourier transform was introduced as using recursion to implement a Fourier transform in $\mathcal{O}(N\log_2N)$ computations, significantly reducing the computational cost of computing the Fourier transform, especially for large $N$. A 'one layer' fast Fourier transform was presented which split the input function into two, and applied the twiddle factor to all values in the layer before calling the matrix-based DFT. This code is replicated below.
End of explanation
"""
x = np.random.random(256) # create random vector to take the DFT of
print np.allclose(loop_DFT(x), matrix_DFT(x)) # returns True if all values are equal (within numerical error)
print np.allclose(matrix_DFT(x), one_layer_FFT(x)) # returns True if all values are equal (within numerical error)
"""
Explanation: We can easily show that each of these functions produce the same results by introducting a discrete test function $x$ and showing that the same results are reported by each function call:
End of explanation
"""
print 'Double Loop DFT:'
%timeit loop_DFT(x)
print '\nMatrix DFT:'
%timeit matrix_DFT(x)
print '\nOne Layer FFT + Matrix DFT:'
%timeit one_layer_FFT(x)
"""
Explanation: We can also time each function to report of the amount of time is takes to return a finished spectrum.
End of explanation
"""
print np.allclose(one_layer_FFT(x), np.fft.fft(x))
print 'numpy FFT:'
%timeit np.fft.fft(x)
"""
Explanation: As we can see the matrix DFT is significatly faster than the double loop DFT, this is because of the fast vectorization functions in numpy. And, the 'one-layer' FFT is about twice as fast as the matrix DFT because of the FFT architecture. We can go one fast and use the built-in numpy FFT:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub
|
notebooks/fio-ronm/cmip6/models/sandbox-1/land.ipynb
|
gpl-3.0
|
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-1', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: FIO-RONM
Source ID: SANDBOX-1
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:01
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
tpin3694/tpin3694.github.io
|
sql/select_rows_based_on_text_string.ipynb
|
mit
|
# Ignore
%load_ext sql
%sql sqlite://
%config SqlMagic.feedback = False
"""
Explanation: Title: Select Rows Based On Text String
Slug: select_rows_based_on_text_string
Summary: Select Rows Based On Text String in SQL.
Date: 2017-01-16 12:00
Category: SQL
Tags: Basics
Authors: Chris Albon
Note: This tutorial was written using Catherine Devlin's SQL in Jupyter Notebooks library. If you have not using a Jupyter Notebook, you can ignore the two lines of code below and any line containing %%sql. Furthermore, this tutorial uses SQLite's flavor of SQL, your version might have some differences in syntax.
For more, check out Learning SQL by Alan Beaulieu.
End of explanation
"""
%%sql
-- Create a table of criminals
CREATE TABLE criminals (pid, name, age, sex, city, minor);
INSERT INTO criminals VALUES (412, 'James Smith', 15, 'M', 'Santa Rosa', 1);
INSERT INTO criminals VALUES (234, 'Bill James', 22, 'M', 'Santa Rosa', 0);
INSERT INTO criminals VALUES (632, 'Stacy Miller', 23, 'F', 'Santa Rosa', 0);
INSERT INTO criminals VALUES (621, 'Betty Bob', NULL, 'F', 'Petaluma', 1);
INSERT INTO criminals VALUES (162, 'Jaden Ado', 49, 'M', NULL, 0);
INSERT INTO criminals VALUES (901, 'Gordon Ado', 32, 'F', 'Santa Rosa', 0);
INSERT INTO criminals VALUES (512, 'Bill Byson', 21, 'M', 'Santa Rosa', 0);
INSERT INTO criminals VALUES (411, 'Bob Iton', NULL, 'M', 'San Francisco', 0);
"""
Explanation: Create Data
End of explanation
"""
%%sql
-- Select all
SELECT *
-- From the criminals table
FROM criminals
"""
Explanation: View Table
End of explanation
"""
%%sql
-- Select all
SELECT *
-- From the criminals table
FROM criminals
-- If name starts with G
WHERE name LIKE 'G%'
"""
Explanation: Select Rows With Names Starting With G
End of explanation
"""
%%sql
-- Select all
SELECT *
-- From the criminals table
FROM criminals
-- If name starts ends with o
WHERE name LIKE '%o'
"""
Explanation: Select Rows With Names Ending With o
End of explanation
"""
%%sql
-- Select all
SELECT *
-- From the criminals table
FROM criminals
-- If name starts with any character then continues with 'ordon'
WHERE name LIKE '_ordon%'
"""
Explanation: Select Rows With Names Starting With Any Character, Then ordon
End of explanation
"""
|
weikang9009/giddy
|
notebooks/Sequence.ipynb
|
bsd-3-clause
|
import numpy as np
import pandas as pd
import libpysal
import mapclassify as mc
f = libpysal.io.open(libpysal.examples.get_path("usjoin.csv"))
pci = np.array([f.by_col[str(y)] for y in range(1929,2010)])
q5 = np.array([mc.Quantiles(y,k=5).yb for y in pci]).transpose()
q5
q5.shape
"""
Explanation: Alignment-based sequence methods
This notebook introduces the alignment-based sequence methods (operationalized by the Optimal Matching (OM) algorithm), which was originally developed for matching protein and DNA sequences in biology and used extensively for analyzing strings in computer science and recently widely applied to explore the neighborhood change.
It generally works by finding the minimum cost for aligning one sequence to match another using a combination of operations including substitution, insertion, deletion and transposition. The cost of each operation can be parameterized diferently and may be theory-driven or data-driven. The minimum cost is considered as the distance between the two sequences.
The sequence module in giddy provides a suite of alignment-based sequence methods.
Author: Wei Kang weikang9009@gmail.com
End of explanation
"""
from giddy.sequence import Sequence
"""
Explanation: Import Sequence class from giddy.sequence:
End of explanation
"""
seq_hamming = Sequence(q5, dist_type="hamming")
seq_hamming
seq_hamming.seq_dis_mat #pairwise sequence distance matrix
"""
Explanation: "hamming"
substitution cost = 1
insertion/deletion cost = $\infty$
End of explanation
"""
seq_interval = Sequence(q5, dist_type="interval")
seq_interval
seq_interval.seq_dis_mat
"""
Explanation: "interval"
Assuming there are $k$ states in the sequences and they are ordinal/continuous.
substitution cost = differences between states
insertion/deletion cost = $k-1$
End of explanation
"""
seq_arbitrary = Sequence(q5, dist_type="arbitrary")
seq_arbitrary
seq_arbitrary.seq_dis_mat
"""
Explanation: "arbitrary"
substitution cost = 0.5
insertion/deletion cost = 1
End of explanation
"""
seq_markov = Sequence(q5, dist_type="markov")
seq_markov
seq_markov.seq_dis_mat
"""
Explanation: "markov"
substitution cost = $1-\frac{p_{ij}+p_{ji}}{2}$ where $p_{ij}$ is the empirical rate of transitioning from state $i$ to $j$
insertion/deletion cost = 1
End of explanation
"""
seq_tran = Sequence(q5, dist_type="tran")
seq_tran
seq_tran.seq_dis_mat
seq_tran.seq_dis_mat
"""
Explanation: "tran"
Biemann, T. (2011). A Transition-Oriented Approach to Optimal Matching. Sociological Methodology, 41(1), 195–221. https://doi.org/10.1111/j.1467-9531.2011.01235.x
End of explanation
"""
|
MikeLing/shogun
|
doc/ipython-notebooks/evaluation/xval_modelselection.ipynb
|
gpl-3.0
|
%pylab inline
%matplotlib inline
# include all Shogun classes
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from shogun import *
# generate some ultra easy training data
gray()
n=20
title('Toy data for binary classification')
X=hstack((randn(2,n), randn(2,n)+1))
Y=hstack((-ones(n), ones(n)))
_=scatter(X[0], X[1], c=Y , s=100)
p1 = Rectangle((0, 0), 1, 1, fc="w")
p2 = Rectangle((0, 0), 1, 1, fc="k")
legend((p1, p2), ["Class 1", "Class 2"], loc=2)
# training data in Shogun representation
features=RealFeatures(X)
labels=BinaryLabels(Y)
"""
Explanation: Evaluation, Cross-Validation, and Model Selection
By Heiko Strathmann - heiko.strathmann@gmail.com - http://github.com/karlnapf - http://herrstrathmann.de.
Based on the model selection framework of his Google summer of code 2011 project | Saurabh Mahindre - github.com/Saurabh7 as a part of Google Summer of Code 2014 project mentored by - Heiko Strathmann
This notebook illustrates the evaluation of prediction algorithms in Shogun using <a href="http://en.wikipedia.org/wiki/Cross-validation_(statistics)">cross-validation</a>, and selecting their parameters using <a href="http://en.wikipedia.org/wiki/Hyperparameter_optimization">grid-search</a>. We demonstrate this for a toy example on <a href="http://en.wikipedia.org/wiki/Binary_classification">Binary Classification</a> using <a href="http://en.wikipedia.org/wiki/Support_vector_machine">Support Vector Machines</a> and also a regression problem on a real world dataset.
General Idea
Splitting Strategies
K-fold cross-validation
Stratified cross-validation
Example: Binary classification
Example: Regression
Model Selection: Grid Search
General Idea
Cross validation aims to estimate an algorithm's performance on unseen data. For example, one might be interested in the average classification accuracy of a Support Vector Machine when being applied to new data, that it was not trained on. This is important in order to compare the performance different algorithms on the same target. Most crucial is the point that the data that was used for running/training the algorithm is not used for testing. Different algorithms here also can mean different parameters of the same algorithm. Thus, cross-validation can be used to tune parameters of learning algorithms, as well as comparing different families of algorithms against each other. Cross-validation estimates are related to the marginal likelihood in Bayesian statistics in the sense that using them for selecting models avoids overfitting.
Evaluating an algorithm's performance on training data should be avoided since the learner may adjust to very specific random features of the training data which are not very important to the general relation. This is called overfitting. Maximising performance on the training examples usually results in algorithms explaining the noise in data (rather than actual patterns), which leads to bad performance on unseen data. This is one of the reasons behind splitting the data and using different splits for training and testing, which can be done using cross-validation.
Let us generate some toy data for binary classification to try cross validation on.
End of explanation
"""
k=5
normal_split=CrossValidationSplitting(labels, k)
"""
Explanation: Types of splitting strategies
As said earlier Cross-validation is based upon splitting the data into multiple partitions. Shogun has various strategies for this. The base class for them is CSplittingStrategy.
K-fold cross-validation
Formally, this is achieved via partitioning a dataset $X$ of size $|X|=n$ into $k \leq n$ disjoint partitions $X_i\subseteq X$ such that $X_1 \cup X_2 \cup \dots \cup X_n = X$ and $X_i\cap X_j=\emptyset$ for all $i\neq j$. Then, the algorithm is executed on all $k$ possibilities of merging $k-1$ partitions and subsequently tested on the remaining partition. This results in $k$ performances which are evaluated in some metric of choice (Shogun support multiple ones). The procedure can be repeated (on different splits) in order to obtain less variance in the estimate. See [1] for a nice review on cross-validation using different performance measures.
End of explanation
"""
stratified_split=StratifiedCrossValidationSplitting(labels, k)
"""
Explanation: Stratified cross-validation
On classificaiton data, the best choice is stratified cross-validation. This divides the data in such way that the fraction of labels in each partition is roughly the same, which reduces the variance of the performance estimate quite a bit, in particular for data with more than two classes. In Shogun this is implemented by CStratifiedCrossValidationSplitting class.
End of explanation
"""
split_strategies=[stratified_split, normal_split]
#code to visualize splitting
def get_folds(split, num):
split.build_subsets()
x=[]
y=[]
lab=[]
for j in range(num):
indices=split.generate_subset_indices(j)
x_=[]
y_=[]
lab_=[]
for i in range(len(indices)):
x_.append(X[0][indices[i]])
y_.append(X[1][indices[i]])
lab_.append(Y[indices[i]])
x.append(x_)
y.append(y_)
lab.append(lab_)
return x, y, lab
def plot_folds(split_strategies, num):
for i in range(len(split_strategies)):
x, y, lab=get_folds(split_strategies[i], num)
figure(figsize=(18,4))
gray()
suptitle(split_strategies[i].get_name(), fontsize=12)
for j in range(0, num):
subplot(1, num, (j+1), title='Fold %s' %(j+1))
scatter(x[j], y[j], c=lab[j], s=100)
_=plot_folds(split_strategies, 4)
"""
Explanation: Leave One Out cross-validation
Leave One Out Cross-validation holds out one sample as the validation set. It is thus a special case of K-fold cross-validation with $k=n$ where $n$ is number of samples. It is implemented in LOOCrossValidationSplitting class.
Let us visualize the generated folds on the toy data.
End of explanation
"""
# define SVM with a small rbf kernel (always normalise the kernel!)
C=1
kernel=GaussianKernel(2, 0.001)
kernel.init(features, features)
kernel.set_normalizer(SqrtDiagKernelNormalizer())
classifier=LibSVM(C, kernel, labels)
# train
_=classifier.train()
"""
Explanation: Stratified splitting takes care that each fold has almost the same number of samples from each class. This is not the case with normal splitting which usually leads to imbalanced folds.
Toy example: Binary Support Vector Classification
Following the example from above, we will tune the performance of a SVM on the binary classification problem. We will
demonstrate how to evaluate a loss function or metric on a given algorithm
then learn how to estimate this metric for the algorithm performing on unseen data
and finally use those techniques to tune the parameters to obtain the best possible results.
The involved methods are
LibSVM as the binary classification algorithms
the area under the ROC curve (AUC) as performance metric
three different kernels to compare
End of explanation
"""
# instanciate a number of Shogun performance measures
metrics=[ROCEvaluation(), AccuracyMeasure(), ErrorRateMeasure(), F1Measure(), PrecisionMeasure(), RecallMeasure(), SpecificityMeasure()]
for metric in metrics:
print metric.get_name(), metric.evaluate(classifier.apply(features), labels)
"""
Explanation: Ok, we now have performed classification on the training data. How good did this work? We can easily do this for many different performance measures.
End of explanation
"""
metric=AccuracyMeasure()
cross=CrossValidation(classifier, features, labels, stratified_split, metric)
# perform the cross-validation, note that this call involved a lot of computation
result=cross.evaluate()
# the result needs to be casted to CrossValidationResult
result=CrossValidationResult.obtain_from_generic(result)
# this class contains a field "mean" which contain the mean performance metric
print "Testing", metric.get_name(), result.mean
"""
Explanation: Note how for example error rate is 1-accuracy. All of those numbers represent the training error, i.e. the ability of the classifier to explain the given data.
Now, the training error is zero. This seems good at first. But is this setting of the parameters a good idea? No! A good performance on the training data alone does not mean anything. A simple look up table is able to produce zero error on training data. What we want is that our methods generalises the input data somehow to perform well on unseen data. We will now use cross-validation to estimate the performance on such.
We will use CStratifiedCrossValidationSplitting, which accepts a reference to the labels and the number of partitions as parameters. This instance is then passed to the class CCrossValidation, which does the estimation using the desired splitting strategy. The latter class can take all algorithms that are implemented against the CMachine interface.
End of explanation
"""
print "Testing", metric.get_name(), [CrossValidationResult.obtain_from_generic(cross.evaluate()).mean for _ in range(10)]
"""
Explanation: Now this is incredibly bad compared to the training error. In fact, it is very close to random performance (0.5). The lesson: Never judge your algorithms based on the performance on training data!
Note that for small data sizes, the cross-validation estimates are quite noisy. If we run it multiple times, we get different results.
End of explanation
"""
# 25 runs and 95% confidence intervals
cross.set_num_runs(25)
# perform x-validation (now even more expensive)
cross.evaluate()
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print "Testing cross-validation mean %.2f " \
% (result.mean)
"""
Explanation: It is better to average a number of different runs of cross-validation in this case. A nice side effect of this is that the results can be used to estimate error intervals for a given confidence rate.
End of explanation
"""
widths=2**linspace(-5,25,10)
results=zeros(len(widths))
for i in range(len(results)):
kernel.set_width(widths[i])
result=CrossValidationResult.obtain_from_generic(cross.evaluate())
results[i]=result.mean
plot(log2(widths), results, 'blue')
xlabel("log2 Kernel width")
ylabel(metric.get_name())
_=title("Accuracy for different kernel widths")
print "Best Gaussian kernel width %.2f" % widths[results.argmax()], "gives", results.max()
# compare this with a linear kernel
classifier.set_kernel(LinearKernel())
lin_k=CrossValidationResult.obtain_from_generic(cross.evaluate())
plot([log2(widths[0]), log2(widths[len(widths)-1])], [lin_k.mean,lin_k.mean], 'r')
# please excuse this horrible code :)
print "Linear kernel gives", lin_k.mean
_=legend(["Gaussian", "Linear"], loc="lower center")
"""
Explanation: Using this machinery, it is very easy to compare multiple kernel parameters against each other to find the best one. It is even possible to compare a different kernel.
End of explanation
"""
feats=RealFeatures(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/fm_housing.dat')))
labels=RegressionLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/housing_label.dat')))
preproc=RescaleFeatures()
preproc.init(feats)
feats.add_preprocessor(preproc)
feats.apply_preprocessor(True)
#Regression models
ls=LeastSquaresRegression(feats, labels)
tau=1
rr=LinearRidgeRegression(tau, feats, labels)
width=1
tau=1
kernel=GaussianKernel(feats, feats, width)
kernel.set_normalizer(SqrtDiagKernelNormalizer())
krr=KernelRidgeRegression(tau, kernel, labels)
regression_models=[ls, rr, krr]
"""
Explanation: This gives a brute-force way to select paramters of any algorithm implemented under the CMachine interface. The cool thing about this is, that it is also possible to compare different model families against each other. Below, we compare a a number of regression models in Shogun on the Boston Housing dataset.
Regression problem and cross-validation
Various regression models in Shogun are now used to predict house prices using the boston housing dataset. Cross-validation is used to find best parameters and also test the performance of the models.
End of explanation
"""
n=30
taus = logspace(-4, 1, n)
#5-fold cross-validation
k=5
split=CrossValidationSplitting(labels, k)
metric=MeanSquaredError()
cross=CrossValidation(rr, feats, labels, split, metric)
cross.set_num_runs(50)
errors=[]
for tau in taus:
#set necessary parameter
rr.set_tau(tau)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#Enlist mean error for all runs
errors.append(result.mean)
figure(figsize=(20,6))
suptitle("Finding best (tau) parameter using cross-validation", fontsize=12)
p=subplot(121)
title("Ridge Regression")
plot(taus, errors, linewidth=3)
p.set_xscale('log')
p.set_ylim([0, 80])
xlabel("Taus")
ylabel("Mean Squared Error")
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(50)
errors=[]
for tau in taus:
krr.set_tau(tau)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#print tau, "error", result.mean
errors.append(result.mean)
p2=subplot(122)
title("Kernel Ridge regression")
plot(taus, errors, linewidth=3)
p2.set_xscale('log')
xlabel("Taus")
_=ylabel("Mean Squared Error")
"""
Explanation: Let us use cross-validation to compare various values of tau paramter for ridge regression (Regression notebook). We will use MeanSquaredError as the performance metric. Note that normal splitting is used since it might be impossible to generate "good" splits using Stratified splitting in case of regression since we have continous values for labels.
End of explanation
"""
n=50
widths=logspace(-2, 3, n)
krr.set_tau(0.1)
metric=MeanSquaredError()
k=5
split=CrossValidationSplitting(labels, k)
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(10)
errors=[]
for width in widths:
kernel.set_width(width)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
#print width, "error", result.mean
errors.append(result.mean)
figure(figsize=(15,5))
p=subplot(121)
title("Finding best width using cross-validation")
plot(widths, errors, linewidth=3)
p.set_xscale('log')
xlabel("Widths")
_=ylabel("Mean Squared Error")
"""
Explanation: A low value of error certifies a good pick for the tau paramter which should be easy to conclude from the plots. In case of Ridge Regression the value of tau i.e. the amount of regularization doesn't seem to matter but does seem to in case of Kernel Ridge Regression. One interpretation of this could be the lack of over fitting in the feature space for ridge regression and the occurence of over fitting in the new kernel space in which Kernel Ridge Regression operates. </br> Next we will compare a range of values for the width of Gaussian Kernel used in Kernel Ridge Regression
End of explanation
"""
n=40
taus = logspace(-3, 0, n)
widths=logspace(-1, 4, n)
cross=CrossValidation(krr, feats, labels, split, metric)
cross.set_num_runs(1)
x, y=meshgrid(taus, widths)
grid=array((ravel(x), ravel(y)))
print grid.shape
errors=[]
for i in range(0, n*n):
krr.set_tau(grid[:,i][0])
kernel.set_width(grid[:,i][1])
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
errors.append(result.mean)
errors=array(errors).reshape((n, n))
from mpl_toolkits.mplot3d import Axes3D
#taus = logspace(0.5, 1, n)
jet()
fig=figure(figsize(15,7))
ax=subplot(121)
c=pcolor(x, y, errors)
_=contour(x, y, errors, linewidths=1, colors='black')
_=colorbar(c)
xlabel('Taus')
ylabel('Widths')
ax.set_xscale('log')
ax.set_yscale('log')
ax1=fig.add_subplot(122, projection='3d')
ax1.plot_wireframe(log10(y),log10(x), errors, linewidths=2, alpha=0.6)
ax1.view_init(30,-40)
xlabel('Taus')
ylabel('Widths')
_=ax1.set_zlabel('Error')
"""
Explanation: The values for the kernel parameter and tau may not be independent of each other, so the values we have may not be optimal. A brute force way to do this would be to try all the pairs of these values but it is only feasible for a low number of parameters.
End of explanation
"""
#use the best parameters
rr.set_tau(1)
krr.set_tau(0.05)
kernel.set_width(2)
title_='Performance on Boston Housing dataset'
print "%50s" %title_
for machine in regression_models:
metric=MeanSquaredError()
cross=CrossValidation(machine, feats, labels, split, metric)
cross.set_num_runs(25)
result=cross.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print "-"*80
print "|", "%30s" % machine.get_name(),"|", "%20s" %metric.get_name(),"|","%20s" %result.mean ,"|"
print "-"*80
"""
Explanation: Let us approximately pick the good parameters using the plots. Now that we have the best parameters, let us compare the various regression models on the data set.
End of explanation
"""
#Root
param_tree_root=ModelSelectionParameters()
#Parameter tau
tau=ModelSelectionParameters("tau")
param_tree_root.append_child(tau)
# also R_LINEAR/R_LOG is available as type
min_value=0.01
max_value=1
type_=R_LINEAR
step=0.05
base=2
tau.build_values(min_value, max_value, type_, step, base)
"""
Explanation: Model selection using Grid Search
A standard way of selecting the best parameters of a learning algorithm is by Grid Search. This is done by an exhaustive search of a specified parameter space. CModelSelectionParameters is used to select various parameters and their ranges to be used for model selection. A tree like structure is used where the nodes can be CSGObject or the parameters to the object. The range of values to be searched for the parameters is set using build_values() method.
End of explanation
"""
#kernel object
param_gaussian_kernel=ModelSelectionParameters("kernel", kernel)
gaussian_kernel_width=ModelSelectionParameters("log_width")
gaussian_kernel_width.build_values(0.1, 6.0, R_LINEAR, 0.5, 2.0)
#kernel parameter
param_gaussian_kernel.append_child(gaussian_kernel_width)
param_tree_root.append_child(param_gaussian_kernel)
# cross validation instance used
cross_validation=CrossValidation(krr, feats, labels, split, metric)
cross_validation.set_num_runs(1)
# model selection instance
model_selection=GridSearchModelSelection(cross_validation, param_tree_root)
print_state=False
# TODO: enable it once crossval has been fixed
#best_parameters=model_selection.select_model(print_state)
#best_parameters.apply_to_machine(krr)
#best_parameters.print_tree()
result=cross_validation.evaluate()
result=CrossValidationResult.obtain_from_generic(result)
print 'Error with Best parameters:', result.mean
"""
Explanation: Next we will create CModelSelectionParameters instance with a kernel object which has to be appended the root node. The kernel object itself will be append with a kernel width parameter which is the parameter we wish to search.
End of explanation
"""
|
VlachosGroup/VlachosGroupAdditivity
|
docs/source/WorkshopJupyterNotebooks/pgradd_demo/pgradd_demo.ipynb
|
mit
|
import pgradd
print(pgradd.__file__)
from pgradd.GroupAdd import GroupLibrary
import pgradd.ThermoChem
lib = GroupLibrary.Load('GRWSurface2018')
"""
Explanation: Theory, Applications, and Tools for Multiscale Kinetic Modeling (July 2020)
pGrAdd Demonstration
1. Introduction
<img src="images/pGrAdd_RGB_github.png" width=400>
A Python package and database, developed by the Vlachos Research Group at the University of Delaware implements the First-Principles Semi-Empirical (FPSE) Group Additivity
(GA) method for estimating thermodynamic properties of molecules. First introduced by Benson et al. for gas molecules and
was later extended by Kua et al. to species adsorbed on catalytic surfaces. GA relies on graph theory defining each molecule
as a collection of groups and their frequency of occurrence. The values of GA groups are determined from DFT-calculated
thermodynamic properties of a (training) set of molecules by linear regression to minimize the difference of thermodynamic
properties of molecules predicted by the GA from those estimated via DFT. This package implements four group additivity
schemes in six databases (See below) and will convert a molecule entered as a Simplified Molecular-Input
Line-Entry System (SMILES) providing the constituent groups, their frequency of occurrence, and estimated
thermodynamic properties for that molecule. pgradd also provides a general GA framework for implementing a custom group additivity scheme from your ab initio data and regression to groups.
2. Available databases
Benson's gas molecule group additivity (BensonGA)
Salciccioli et al. (2012) adsorbate on Pt(111) group additivity scheme (SalciccioliGA2012)
Gu et al. (2017) solvated adsorbate on Pt(111) group additivity scheme (GuSolventGA2017Aq, GuSolventGA2017Vac)
Wittreich (2018) adsorbate on Pt(111). Subset of Gu et al. including only surface species, group values regressed with OLS/GLS (Maximum Likelihood) and DFT data processed with pMuTT (GRWSurface2018)
Wittreich (2018) solvated adsorbate on Pt(111). Subset of Gu et al. including only surface species, group values regressed with OLS/GLS (Maximum Likelihood) and DFT data processed with pMuTT (GRWAqueous2018)
3. Installation
pip install pgradd
or upgrade from a previous version
pip install --upgrade pgradd
4. Demonstration
Load the pGrAdd libraries
End of explanation
"""
groups = lib.GetDescriptors('C(CC([Pt])([Pt])[Pt])([Pt])([Pt])[Pt]')
print('Group Frequency')
print('----- ---------')
for x in groups:
print('{0:20s} {1:g}'.format(x, groups[x]))
"""
Explanation: Find the groups in a molecule
Molecule: CCH<sub>2</sub>C(S)
SMILES: C(CC([Pt])([Pt])[Pt])([Pt])([Pt])[Pt])
<img src="images/CCH2C(S).png" width=400>
End of explanation
"""
thermochem = lib.Estimate(groups, 'thermochem')
S = thermochem.get_S(T=298.15, units='cal/mol/K')
H = thermochem.get_H(T=298.15, units='kcal/mol')
G = thermochem.get_G(T=298.15, units='kJ/mol')
HoRT = thermochem.get_HoRT(298.15)
print('Entropy(298 K) = {0:5.2f} [cal/mol/K]'.format(S))
print('Enthalpy(298 K) = {0:5.2f} [kcal/mol]'.format(H))
print('Gibb''s(298 K) = {0:5.2f} [kJ/mol]'.format(G))
print('Dimensionless Enthalpy(298 K) = {0:5.2f}'.format(HoRT))
import numpy as np
from pmutt import plot_1D
from matplotlib import pyplot as plt
T = np.linspace(300, 1500)
fig1, ax1 = plot_1D(thermochem, x_name='T', x_values=T,
methods=('get_H', 'get_S', 'get_G'),
get_H_kwargs={'units':'kcal/mol'},
get_S_kwargs={'units':'cal/mol/K'},
get_G_kwargs={'units': 'kJ/mol'})
fig1.set_figheight(10)
ax1[0].set_ylabel('H (kcal/mol)')
ax1[1].set_ylabel('S (cal/mol/K)')
ax1[2].set_ylabel('G (kJ/mol)')
ax1[0].set_xlabel('Temperature [K]')
ax1[1].set_xlabel('Temperature [K]')
ax1[2].set_xlabel('Temperature [K]')
plt.tight_layout()
plt.show()
from pmutt.empirical.nasa import Nasa
from pmutt.io.thermdat import write_thermdat
N = []
N.append(Nasa.from_model(thermochem, name='CCH2C(S)', T_low=300, T_high=1500, phase='S', elements={'C': 3, 'H': 2}))
print(write_thermdat(N))
"""
Explanation: Calculate thermodynamic properties of the molecule
End of explanation
"""
groups = lib.GetDescriptors('C([Pt])(O)C')
print('Group Frequency')
print('----- ---------')
for x in groups:
print('{0:20s} {1:g}'.format(x, groups[x]))
"""
Explanation: Find the groups in a molecule
Molecule: CH<sub>3</sub>CHOH(S)
SMILES: C([Pt])(O)C
<img src="images/CH3CHOH(S).png" width=400>
End of explanation
"""
thermochem = lib.Estimate(groups, 'thermochem')
S = thermochem.get_S(T=298.15, units='cal/mol/K')
H = thermochem.get_H(T=298.15, units='kcal/mol')
G = thermochem.get_G(T=298.15, units='kJ/mol')
HoRT = thermochem.get_HoRT(298.15)
print('Entropy(298 K) = {0:5.2f} [cal/mol/K]'.format(S))
print('Enthalpy(298 K) = {0:5.2f} [kcal/mol]'.format(H))
print('Gibb''s(298 K) = {0:5.2f} [kJ/mol]'.format(G))
print('Dimensionless Enthalpy(298 K) = {0:5.2f}'.format(HoRT))
import numpy as np
from pmutt import plot_1D
from matplotlib import pyplot as plt
T = np.linspace(300, 1500)
fig1, ax1 = plot_1D(thermochem, x_name='T', x_values=T,
methods=('get_H', 'get_S', 'get_G'),
get_H_kwargs={'units':'kcal/mol'},
get_S_kwargs={'units':'cal/mol/K'},
get_G_kwargs={'units': 'kJ/mol'})
fig1.set_figheight(10)
ax1[0].set_ylabel('H (kcal/mol)')
ax1[1].set_ylabel('S (cal/mol/K)')
ax1[2].set_ylabel('G (kJ/mol)')
ax1[0].set_xlabel('Temperature [K]')
ax1[1].set_xlabel('Temperature [K]')
ax1[2].set_xlabel('Temperature [K]')
plt.tight_layout()
plt.show()
from pmutt.empirical.nasa import Nasa
from pmutt.io.thermdat import write_thermdat
N = []
N.append(Nasa.from_model(thermochem, name='CH3CHOH(S)', T_low=300, T_high=1500, phase='S', elements={'C': 2, 'H': 5, 'O':1}))
print(write_thermdat(N))
"""
Explanation: Calculate thermodynamic properties of the molecule
End of explanation
"""
|
ajs3g11/training-public
|
FEEG6016 Simulation and Modelling/02-Monte-Carlo_Lab-2.ipynb
|
mit
|
from IPython.core.display import HTML
css_file = 'https://raw.githubusercontent.com/ngcm/training-public/master/ipython_notebook_styles/ngcmstyle.css'
HTML(url=css_file)
"""
Explanation: Monte Carlo Methods: Lab 2
End of explanation
"""
p_JZG_T2 = [0.1776, 0.329, 0.489, 0.7, 1.071, 1.75, 3.028, 5.285, 9.12]
"""
Explanation: Point to note: an electronic copy of the Frenkel and Smit book is available through the library. This lab is based on case study 1 in chapter 3.4 of that book.
Lennard-Jones fluids
When computing the interactions between lots of bodies (atoms, molecules, planets, etc) we can either use the true potential or force between them, or we can approximate it with some potential (or force) that is easier (and usually cheaper) to calculate. The parameters of the potential can then be set to approximate the "real" features we're interested in.
In computational chemistry, one such approximation is the Lennard-Jones potential. Given two bodies separated by a distance $r$, the potential generated by those two bodies is
\begin{equation}
U(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{12} - \left( \frac{\sigma}{r} \right)^{6} \right].
\end{equation}
Here $\varepsilon$ and $\sigma$ are parameters. When there are more than two bodies the total potential is the sum over all pairwise potentials.
In principle this generates a potential between particles that are separated by huge distances. Instead it is typical to truncate the potential: to pick a cut-off distance so that any particles separated by more than that distance do not contribute, and to correct for those small contributions.
Here we use a Lennard-Jones potential inside a box size $[0,L]^3$ with a cut-off $r_c = L/2$, with parameters set so that
\begin{equation}
U = \begin{cases} 4 \left[ \frac{1}{r^{12}} - \frac{1}{r^6} \right] & r < r_c \ 0 & r > r_c. \end{cases}
\end{equation}
Include tail corrections (that is, additional energy and pressure terms resulting from the particles outside the cutoff radius) as
\begin{align}
U^{\text{tail}} & = \frac{8 \pi \rho}{3} \left[ \frac{1}{3} \frac{1}{r_c^9} - \frac{1}{r_c^3} \right] \
p^{\text{tail}} & = \frac{16 \pi \rho^2}{3} \left[ \frac{2}{3} \frac{1}{r_c^9} - \frac{1}{r_c^3} \right].
\end{align}
For each configuration we need to compute the pressure using
$$
\begin{equation}
p = \frac{\rho}{\beta} + \frac{\text{Virial}}{V}
\end{equation}
$$
where
$$
\begin{equation}
\text{Virial} = \sum_i \sum_{j > i} \vec{f}( \vec{r}{ij} ) \cdot \vec{r}{ij}
\end{equation}
$$
where, as usual, $\vec{r}{ij}$ is the separation between the atoms, $\vec{r}{ij} = \vec{r}_i - \vec{r}_j$, and the intermolecular force $\vec{f}$ is given by
$$
\begin{align}
\vec{f}(\vec{r}{ij}) &= - \nabla U \
& = \begin{cases} 24 \left[ 2 \frac{1}{r^{14}} - \frac{1}{r^8} \right] \vec{r}{ij} & r < r_c \ \vec{0} & r > r_c \end{cases}
\end{align}
$$
Note that in the reduced coordinates $\beta = T^{-1}$.
Monte Carlo code
We will be using an $NTV$ approach, keeping the number of particles fixed ($N = 100$), the temperature fixed ($T=2$) and the volume fixed (indirectly, via the density $\rho = N / V = N L^{-3}$; use $\rho = a/10$ for $a = 1, \dots, 9$, but start by just considering the $a=1, 2$ cases). You will need to take at least $10,000$ steps for the larger values of $a$; $20,000$ is better, but in all cases you should test with a smaller number of particles and steps ($1,000$ may be sufficient for small values of $a$).
For reference we note the solutions, taken from Johnson, Zollweg and Gubbins for the pressures at $T=2$ are:
End of explanation
"""
%matplotlib inline
import numpy
from scipy import constants
from matplotlib import pyplot
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 16
rcParams['figure.figsize'] = (12,6)
"""
Explanation: Efficiency
Note that the sum over all particles scales as $n^2$ where $n$ is the number of particles. As the number of steps the algorithm will need to take will also scale as $n$, this makes the number of calculations at least as bad as $n^3$. This is expensive; if you try the naive approach then you'll have difficulty using more than 50 particles in a moderate time.
Instead we can note that, at each stage, the algorithm will move only one particle. Therefore, if we store not just the locations of the particles but also their pairwise separations, at each step we will only have to modify a small number of the separations. So we can store $r^2_{ij} = \vec{r}{ij} \cdot \vec{r}{ij}$ only, for $j > i$, and when perturbing particle $k$ we only need to update the separations $r^2_{ik}$ for $i<k$ and $r^2_{kj}$ for $k<j$.
This should significantly reduce the number of calculations done in each step.
In addition, note that for reasonable behaviour the acceptance rate should be $\sim 40\%$. This depends on the fractional perturbation distance $\Delta$; values $\sim 0.4$ are reasonable when $\rho \sim 0.1$, but values $\sim 0.02$ are reasonable when $\rho \sim 0.9$.
Results
Check that the energy has converged to a "constant" state.
Plot a histogram of the energies to show that they follow the Boltzmann distribution.
End of explanation
"""
|
fotis007/python_intermediate
|
Python_2_2.ipynb
|
gpl-3.0
|
#beispiel
a = [1, 2, 3,]
my_iterator = iter(a)
my_iterator.__next__()
my_iterator.__next__()
"""
Explanation: Table of Contents
<p><div class="lev1 toc-item"><a href="#Python-für-Fortgeschrittene-2" data-toc-modified-id="Python-für-Fortgeschrittene-2-1"><span class="toc-item-num">1 </span>Python für Fortgeschrittene 2</a></div><div class="lev2 toc-item"><a href="#Funktionales-Programmieren-I" data-toc-modified-id="Funktionales-Programmieren-I-11"><span class="toc-item-num">1.1 </span>Funktionales Programmieren I</a></div><div class="lev3 toc-item"><a href="#Typen-von-Programmiersprachen:" data-toc-modified-id="Typen-von-Programmiersprachen:-111"><span class="toc-item-num">1.1.1 </span>Typen von Programmiersprachen:</a></div><div class="lev3 toc-item"><a href="#Weitere-Merkmale-des-funktionalen-Programmierens:" data-toc-modified-id="Weitere-Merkmale-des-funktionalen-Programmierens:-112"><span class="toc-item-num">1.1.2 </span>Weitere Merkmale des funktionalen Programmierens:</a></div><div class="lev3 toc-item"><a href="#Vorteile-des-funktionalen-Programmierens:" data-toc-modified-id="Vorteile-des-funktionalen-Programmierens:-113"><span class="toc-item-num">1.1.3 </span>Vorteile des funktionalen Programmierens:</a></div><div class="lev2 toc-item"><a href="#Iteratoren" data-toc-modified-id="Iteratoren-12"><span class="toc-item-num">1.2 </span>Iteratoren</a></div><div class="lev2 toc-item"><a href="#List-Comprehension" data-toc-modified-id="List-Comprehension-13"><span class="toc-item-num">1.3 </span>List Comprehension</a></div><div class="lev2 toc-item"><a href="#Geschachtelte-Schleifen" data-toc-modified-id="Geschachtelte-Schleifen-14"><span class="toc-item-num">1.4 </span>Geschachtelte Schleifen</a></div><div class="lev3 toc-item"><a href="#Aufgabe-1" data-toc-modified-id="Aufgabe-1-141"><span class="toc-item-num">1.4.1 </span>Aufgabe 1</a></div><div class="lev2 toc-item"><a href="#Die-Funktionen-map(),-filter()" data-toc-modified-id="Die-Funktionen-map(),-filter()-15"><span class="toc-item-num">1.5 </span>Die Funktionen map(), filter()</a></div><div class="lev3 toc-item"><a href="#map()" data-toc-modified-id="map()-151"><span class="toc-item-num">1.5.1 </span>map()</a></div><div class="lev3 toc-item"><a href="#Aufgabe-2" data-toc-modified-id="Aufgabe-2-152"><span class="toc-item-num">1.5.2 </span>Aufgabe 2</a></div><div class="lev3 toc-item"><a href="#Aufgabe-3-(optional)" data-toc-modified-id="Aufgabe-3-(optional)-153"><span class="toc-item-num">1.5.3 </span>Aufgabe 3 (optional)</a></div><div class="lev3 toc-item"><a href="#filter()" data-toc-modified-id="filter()-154"><span class="toc-item-num">1.5.4 </span>filter()</a></div><div class="lev3 toc-item"><a href="#Aufgabe-4" data-toc-modified-id="Aufgabe-4-155"><span class="toc-item-num">1.5.5 </span>Aufgabe 4</a></div><div class="lev2 toc-item"><a href="#Das-itertools-Modul" data-toc-modified-id="Das-itertools-Modul-16"><span class="toc-item-num">1.6 </span>Das itertools-Modul</a></div><div class="lev3 toc-item"><a href="#Neuen-Iterator-erzeugen" data-toc-modified-id="Neuen-Iterator-erzeugen-161"><span class="toc-item-num">1.6.1 </span>Neuen Iterator erzeugen</a></div><div class="lev3 toc-item"><a href="#Aufgabe-5" data-toc-modified-id="Aufgabe-5-162"><span class="toc-item-num">1.6.2 </span>Aufgabe 5</a></div><div class="lev3 toc-item"><a href="#Teile-der-Ausgabe-eines-Iterators-auswählen." data-toc-modified-id="Teile-der-Ausgabe-eines-Iterators-auswählen.-163"><span class="toc-item-num">1.6.3 </span>Teile der Ausgabe eines Iterators auswählen.</a></div><div class="lev3 toc-item"><a href="#Iteratoren-kombinieren" data-toc-modified-id="Iteratoren-kombinieren-164"><span class="toc-item-num">1.6.4 </span>Iteratoren kombinieren</a></div><div class="lev3 toc-item"><a href="#Aufgabe-7" data-toc-modified-id="Aufgabe-7-165"><span class="toc-item-num">1.6.5 </span>Aufgabe 7</a></div><div class="lev3 toc-item"><a href="#The-operator-module" data-toc-modified-id="The-operator-module-166"><span class="toc-item-num">1.6.6 </span>The operator module</a></div><div class="lev2 toc-item"><a href="#Lambda-Funktionen" data-toc-modified-id="Lambda-Funktionen-17"><span class="toc-item-num">1.7 </span>Lambda-Funktionen</a></div><div class="lev2 toc-item"><a href="#Hausaufgabe" data-toc-modified-id="Hausaufgabe-18"><span class="toc-item-num">1.8 </span>Hausaufgabe</a></div><div class="lev2 toc-item"><a href="#Lösungen" data-toc-modified-id="Lösungen-19"><span class="toc-item-num">1.9 </span>Lösungen</a></div><div class="lev3 toc-item"><a href="#Aufgabe-1" data-toc-modified-id="Aufgabe-1-191"><span class="toc-item-num">1.9.1 </span>Aufgabe 1</a></div><div class="lev3 toc-item"><a href="#Aufgabe-2" data-toc-modified-id="Aufgabe-2-192"><span class="toc-item-num">1.9.2 </span>Aufgabe 2</a></div><div class="lev3 toc-item"><a href="#Aufgabe-4" data-toc-modified-id="Aufgabe-4-193"><span class="toc-item-num">1.9.3 </span>Aufgabe 4</a></div>
## Python für Fortgeschrittene 2
### Funktionales Programmieren I
#### Typen von Programmiersprachen:
<ul>
<li>Prozedural<br/>
Programm besteht aus einer Liste von Anweisungen, die sequentiell abgearbeitet werden. Die meisten Programmiersprachen sind prozedural, z.B. C.</li>
<li>Deklarativ<br/>
Im Programm wird nur spezifiziert, welches Problem gelöst werden soll, der Interpreter setzt dies dann in Anweisungen um, z.B. SQL</li>
<li>Objekt-orientiert<br/>
Programme erzeugen und verwenden Objekte und manipulieren diese Objekte. Objekte haben interne Zustände, die durch Methoden gesetzt werden, z.B. Java, C++. </li>
<li>Funktional<br/>
Zerlegen ein Problem in eine Reihe von Funktionen (vergleichbar mit mathematischen Funktionen, z.B. f(x) = y. Die Funktionen haben einen definierten Input und Output, aber keine internen Zustand, der die Ausgabe eines bestimmten Input beeinflusst, z.B. Lisp oder Haskell.</li>
</ul>
#### Weitere Merkmale des funktionalen Programmierens:
<ul>
<li>Funktionen können wie Daten behandelt werden, d.h. man kann einer Funktion als Parameter eine Funktion geben bzw. die Ausgabe einer Funktion kann eine Funktion sein.</li>
<li>Rekursion ist die primäre Form der Ablaufkontrolle, etwa um Schleifen zu erzeugen.</li>
<li>Im Zentrum steht die Manipulation von Listen. </li>
<li>'Reine' funktionale Programmiersprachen vermeiden Nebeneffekte, z.B. einer Variablen erst einen Wert und dann einen anderen zuzuweisen, um so den internen Zustand des Programms zu verfolgen. Einige Funktionen werden aber nur wegen ihrer 'Nebeneffekte' aufgerufen, z.B. print() oder time.sleep() und nicht für die Rückgabewerte der Funktion. </li>
<li>Funktionale Programmiersprachen vermeiden Zuweisungen und arbeiten stattdessen mit Ausdrücken, also mit Funktionen, die Parameter haben und eine Ausgabe. Im Idealfall besteht das ganze Programm aus einer Folge von Funktionen, wobei die Ausgabe der einen Funktion zum Parameter der nächsten wird usw., z.B.:<br/>
a = 3<br/>
func3(func2(func1(a)))<br/>
<li>Funktionale Programmiersprachen verwenden vor allem Funktionen, die auf anderen Funktionen arbeiten, die auf anderen Funktionen arbeiten.
</ul>
#### Vorteile des funktionalen Programmierens:
<ul>
<li>Formale Beweisbarkeit (eher von akademischem Interesse</li>
<li>Modularität<br/>
Funktionales Programmieren erzwingt das Schreiben von sehr kleinen Funktionen, die leichter wiederzuverwenden und modular einzusetzen sind.</li>
<li>Einfachheit der Fehlersuche und des Testens<br/>
Da Ein- und Ausgabe stets klar definiert sind, sind Fehlersuche und das Erstellen von Unittests einfacher</li>
</ul>
Wie immer gilt in Python auch hier: Python ermöglicht die Verwendung des funktionalen Paradigmas, erzwingt es aber nicht durch Einschränkungen, wie es reine funktionale Programmiersprachen tun. Typischerweise verwendet man in Python prozedurale, objekt-orientierte und funktionale Verfahren, z.B. kann man objekt-orientiertes und funktionales Programmieren verwenden, indem man Funktionen definiert, die als Ein- und Ausgabe Objekte verwenden.
In Python wird das funktionale Programmieren u.a. durch folgende Komponenten realisiert:
<ul>
<li>Iteratoren</li>
<li>List Comprehension, Generator Expressions</li>
<li>Die Funktionen map(), filter()</li>
<li>Das itertools Modul </li>
</ul>
### Iteratoren
Die Methode iter() versucht für ein beliebiges Objekt einen Iterator zurückzugeben. Der Iterator gibt bei jedem Aufruf ein Objekt der Liste zurück und setzt den Pointer der Liste um eines höher. Objekte sind iterierbar (iterable) wenn sie die Methode iter() unterstützen, z.B. Listen, Dictionaries, Dateihandles usw.
End of explanation
"""
for i in a:
print(str(i))
"""
Explanation: Python erwartet in bestimmten Kontexten ein iterierbares Objekt, z.B. in der for-Schleife:
End of explanation
"""
for i in iter(a):
print(str(i))
"""
Explanation: Das ist äquivalent zu
End of explanation
"""
#beispiel
a = [1, 2, 3,]
my_iterator = iter(a)
list(my_iterator)
my_iterator = iter(a)
tuple(my_iterator)
"""
Explanation: Man kann sich die vollständige Ausgabe eines Iterators ausgeben lassen, wenn man ihn als Parameter der list()- oder tuple() Funktion übergibt.
End of explanation
"""
#eine traditionelle for-Schleife:
squared = []
for x in range(10):
squared.append(x**2)
squared
"""
Explanation: Frage: Warum habe ich im letzten Beispiel den Iterator neu erzeugt? Kann man das weglassen?
<h3>List Comprehension</h3>
<p>List Comprehension sind ein Element (von vielen) des funktionalen Programmierens in Python. Der wichtigste Vorteil ist das Vermeiden von Nebeneffekten. Was heißt das? Anstelle des Verändern des Zustands einer Datenstruktur (z.B. eines Objekts), sind funktionale Ausdrücke wie mathematische Funktionen aufgebaut, die nur aus einem klaren Input und einen ebenso eindeutig definierten Output bestehen.</p>
<p>Prinzipielle Schreibweise: <br/>
<code>[<expression> for <variable> in <iterable> <<if <condition> >>]</code>
<p>Im folgenden Beispiel ist es das Ziel, die Zahlen von 0 bis 9 ins Quadrat zu setzen. Zuerst die traditionelle Lösung mit einer for-Schleife, in deren Körper eine neue Datenstruktur aufgebaut wird.</p>
End of explanation
"""
[x**2 for x in range(10)]
#a + bx
#2 + 0.5x
#x = 5 bis x = 10
[x*0.5 + 2 for x in range(5, 11)]
"""
Explanation: Und hier die Version mit List Comprehension:
End of explanation
"""
squared = [x**2 for x in range(10)]
squared
"""
Explanation: Natürlich kann man den Rückgabewert von List Comprehensions auch in einer Variablen abspeichern.
End of explanation
"""
#Aufgabe: vergleiche zwei Zahlenlisten und gebe alle Zahlenkombinationen aus, die ungleich sind
#Erst einmal die traditionelle Lösung mit geschachtelten Schleifen:
combs = []
for x in [1,2,3 ]:
for y in [3,1,4]:
if x != y:
combs.append((x, y))
combs
"""
Explanation: Geschachtelte Schleifen
Man kann in list comprehensions auch mehrere geschachtelte for-Schleifen aufrufen:
End of explanation
"""
[(x,y) for x in [1,2,3] for y in [3,1,4] if x != y]
"""
Explanation: Und nun als List Comprehension:
End of explanation
"""
a = ["ein Haus", "eine Tasse", "ein Kind"]
list(map(len, a))
"""
Explanation: <h4>Aufgabe 1</h4>
<p>Ersetzen Sie eine Reihe von Worten durch eine Reihe von Zahlen, die die Anzahl der Vokale anzeigen. Z.B.: "Dies ist ein Satz" -> "2 1 2 1". </p>
Die Funktionen map(), filter()
map()
map(FunktionX, Liste)<br/>
Die Funktion FunktionX wird auf jedes Element der Liste angewandt. Ausgabe ist ein Iterator über eine neue Liste mit den Ergebnissen
End of explanation
"""
for i in a:
print(len(i))
"""
Explanation: prozedurale Schreibweise:
End of explanation
"""
#returns True if x is an even number
def is_even(x):
return (x % 2) == 0
b = [2,3,4,5,6]
list(filter(is_even, b))
"""
Explanation: Aufgabe 2
Verwenden Sie map() um in einer Liste von Worten jedes Wort in Großbuchstaben auszugeben. Diskutieren Sie evtl. Probleme mit einem Nachbarn.
Aufgabe 3 (optional)
Lösen Sie Aufgabe 1 mit map()
filter()
filter(FunktionX, Liste)<br/>
Die Funktion FunktionX wird auf jedes Element der Liste angewandt. Konstruiert einen neuen Iterator, in den die Elemente der Liste aufgenommen werden, für die die FunktionX den Ausgabewert True hat.
<br/>Bsp.:
End of explanation
"""
import itertools
#don't try this at home:
#list(itertools.cycle([1,2,3,4,5]))
"""
Explanation: Aufgabe 4
Verwenden Sie filter, um aus dem folgenden Text eine Wortliste zu erstellen, in der alle Pronomina, Artikel und die Worte "dass", "ist", "nicht", "auch", "und" nicht enthalten sind: <br/>
"Ich denke auch, dass ist nicht schlimm. Er hat es nicht gemerkt und das ist gut. Und überhaupt: es ist auch seine Schuld. Ehrlich, das ist wahr."
Das itertools-Modul
Die Funktionen des itertools-Moduls lassen sich einteilen in Funktionen, die:
<ul>
<li>die einen neuen Iterator auf der Basis eines existierenden Iterators erzeugen. </li>
<li>die Teile der Ausgabe eines Iterators auswählen. </li>
<li>die die Ausgabe eines Iterators gruppieren.</li>
<li>die Iteratoren kombinieren</li>
</ul>
Neuen Iterator erzeugen
Diese Funktionen erzeugen einen neuen Iterator auf der Basis eines existierenden: <br/>
itertools.count(),itertools.cycle(), itertools.repeat(), itertools.chain(), itertools.isslice(), itertools.tee()
itertools.cycle(iterator) Gibt die Liste der Elemente in iterator in einer unendlichen Schleife zurück
End of explanation
"""
import itertools
list(itertools.repeat([1,2,3,4], 3))
"""
Explanation: itertools.repeat(iterator, [n]) wiederholt die Elemente in iterator n mal.
End of explanation
"""
a = [1, 2, 3]
b = [4, 5, 6]
c = [7, 8, 9]
list(itertools.chain(a, b, c))
"""
Explanation: itertools.chain(iterator_1, iterator_2, ...) Erzeugt einen neuen Iterator, in dem die Elemente von iterator_1, _2 usw. aneinander gehängt sind.
End of explanation
"""
tuple(itertools.combinations([1, 2, 3, 4], 2))
"""
Explanation: Aufgabe 5
Verknüpfen Sie den Inhalt dreier Dateien zu einem Iterator
Teile der Ausgabe eines Iterators auswählen.
itertools.filterfalse(Prädikat, iterator) ist das Gegenstück zu filter(). Ausgabe enthält alle Elemente, für die das Prädikat falsch ist.
itertools.takewhile(Prädikat, iterator) - gibt solange Elemente aus, wie das Prädikat wahr ist
itertools.dropwhile(Prädikat, iter)entfernt alle Elemente, solange das Prädikat wahr ist. Gibt dann den Rest aus.
itertools.compress(Daten, Selektoren) Nimmt zweei Iteratoren un dgibt nur die Elemente des ersten (Daten) zurück, für die das entsprechende Element im zweiten (Selektoren) wahr ist. Stoppt, wenn einer der Iteratoren erschöpft ist.
Iteratoren kombinieren
itertools.combinations(Iterator, r) gibt alle r-Tuple Kombinationen der Elemente des Iterators wieder. Beispiel:
End of explanation
"""
tuple(itertools.permutations([1, 2, 3, 4], 2))
"""
Explanation: itertools.permutations(iterator, r) gibt alle Permutationen aller Elemente unabhängig von der Reihenfolge in Iterator wieder:
End of explanation
"""
a = [2, -3, 8, 12, -22, -1]
list(map(abs, a))
"""
Explanation: Aufgabe 7
Wieviele Zweier-Permutationen sind mit den graden Zahlen zwischen 1 und 101 möglich?
The operator module
Mathematische Operationen: add(), sub(), mul(), floordiv(), abs(), ... <br/>
Logische Operationen: not_(), truth()<br/>
Bit Operationen: and_(), or_(), invert()<br/>
Vergleiche: eq(), ne(), lt(), le(), gt(), and ge()<br/>
Objektidentität: is_(), is_not()<br/>
End of explanation
"""
def calc(n):
return (n * 13) / 100
a = [1, 2, 5, 7]
list(map(calc, a))
"""
Explanation: Lambda-Funktionen
lambda erlaubt es, kleine Funktionen anonym zu definieren. Nehmen wir an, wir wollen in einer List von Zahlen alle Zahlen durch 100 teilen und mit 13 multiplizieren. Dann könnten wir das so machen:
End of explanation
"""
list(map(lambda x: (x * 13)/100, a))
"""
Explanation: Diese Funktion können wir mit Lambda nun direkt einsetzen:
End of explanation
"""
#zählt die Vokale eines strings
def cv(word):
return sum([1 for a in word if a in "aeiouAEIOUÄÖÜäöü"])
a = "Dies ist eine Lüge, oder nicht?"
[cv(w) for w in a.split()]
"""
Explanation: Allerdings gibt es sehr unterschiedliche Meinungen darüber, ob auf diese Weise guter Code entsteht. Ich finde diesen Ratschlag anz gut:
<ul>
<li>Write a lambda function.</li>
<li>Write a comment explaining what the heck that lambda does. </li>
<li>Study the comment for a while, and think of a name that captures the essence of the comment. </li>
<li>Convert the lambda to a def statement, using that name. </li>
<li>Remove the comment. </li>
</ul>
Hausaufgabe
1) Geben Sie alle Unicode-Zeichen zwischen 34 und 250 aus und geben Sie alle aus, die keine Buchstaben oder Zahlen sind
2) Wie könnte man alle Dateien mit der Endung *.txt in einem Unterverzeichnis hintereinander ausgeben?
3) Schauen Sie sich in der Python-Dokumentation die Funktionen sort und itemgetter an. Wie kann man diese so kombinieren, dass man damit ein Dictionary nach dem value sortieren kann. (no stackoverflow :-)
<br/><br/><br/><br/><br/><br/><br/><br/>
Lösungen
Aufgabe 1
End of explanation
"""
#uppeditys the string word
def upper(word):
return word.upper()
a = ["dies", "ist", "Ein", "satz"]
list(map(upper, a))
"""
Explanation: <br/>
<br/><br/><br/><br/><br/>
Aufgabe 2
End of explanation
"""
def cv(word):
return sum([1 for a in word if a in "aeiouAEIOUÄÖÜäöü"])
a = "Dies ist eine Lüge, oder nicht?"
list(map(cv, a.split()))
"""
Explanation: <br/><br/><br/><br/><br/><br/><br/><br/>
Aufgabe 3
End of explanation
"""
import re
#returns True if word is a function word
def is_no_function_word(word):
f_words = ["der", "die", "das", "ich", "du", "er", "sie", "es", "wir", "ihr", "dass", "ist", "hat", "auch", "und", "nicht"]
if word.lower() in f_words:
return False
else:
return True
text = """Ich denke auch, dass ist nicht schlimm. Er hat es nicht gemerkt und das ist gut.
Und überhaupt: es ist auch seine Schuld. Ehrlich, das ist wahr."""
list(filter(is_no_function_word, re.findall("\w+", text)))
"""
Explanation: <br/><br/><br/><br/><br/><br/><br/><br/>
Aufgabe 4
End of explanation
"""
|
csc-training/python-introduction
|
notebooks/exercises/4 - Functions and exceptions.ipynb
|
mit
|
def celsius_to_kelvin(c):
# implementation here
pass
celsius_to_kelvin(0)
"""
Explanation: Functions and exceptions
Functions
Write a function that converts from Celsius to Kelvin.
To convert from Celsius to Kelvin you add 273.15 from the value.
Try your solution for a few values.
End of explanation
"""
def fahrenheit_to_celsius(f):
pass
fahrenheit_to_celsius(0)
"""
Explanation: Now write another function to convert from Fahrenheit to Celsius.
The formula for doing so is
C = 5/9*(F-32)
Again, verify that your function does what is expected.
End of explanation
"""
def fahrenheit_to_kelvin(f):
pass
fahrenheit_to_kelvin(0)
"""
Explanation: Now make a function to convert from Fahrenheit to Kelvin.
Before you start coding, stop to think for a second. You can actually re-use the two other functions you have made. Fahrenheit to Kelvin can be represented as Fahrenheit to Celsius followed by Celsius to Kelvin.
End of explanation
"""
var = float(input("give a number: "))
divided = 1/var
"""
Explanation: Finally, implement a more general conversion function that takes as arguments also the input and output scales, e.g. from_scale and to_scale. Provide default values for from_scale and to_scale, and call the function with different number of arguments. Try to call the function using both positional and keyword arguments. Which approach is more readable for you?
Exceptions
Ok, here's some code that fails. Find out at least 2 errors it raises by giving different inputs.
Then construct a try-except clause around the lines of code.
End of explanation
"""
file_handle = open("i_dont_exist", "r")
"""
Explanation: The open function is used to open files for reading or writing. We'll get to that but first let's try to open a file that doesn't exist.
Filesystem related errors are very common. A file might not exist or for some reason the user might not have the rights to open the file. Go ahead and make a try-except clause to catch this error.
End of explanation
"""
def celsius_to_fahrenheit(c):
pass
def kelvin_to_celsius(k):
pass
def kelvin_to_fahrenheit(k):
pass
def temperature_converter():
from_scale = input("Give scale to convert from: ")
to_scale = input("Give scale to convert to: ")
value = float(input("Give temperature: "))
if from_scale == "K" and to_scale == "F":
return kelvin_to_fahrenheit(value)
elif from_scale == "F" and to_scale == "K":
return fahrenheit_to_kelvin
elif from_scale == "C" or to_scale == "C":
raise NotImplementedError("Conversion to Celsius not implemented!")
return
temperature_converter()
"""
Explanation: Compound
Implement the three remaining functions so you can convert freely between Fahrenheit and Kelvin.
Now look at the temperature_converter function. Try to figure out what errors malformed user input can cause. You can either wrap the function call in a try-except or you can wrap parts of the function.
If you have time you can increase the complexity of the function to cover centigrade conversions as well but this is not required. Hint: if you always convert the value to centigrade if it is not and to the desired output if desired output is not you can simplify the code.
End of explanation
"""
|
cathalmccabe/PYNQ
|
boards/Pynq-Z1/base/notebooks/pmod/pmod_grove_buzzer.ipynb
|
bsd-3-clause
|
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
"""
Explanation: Grove Buzzer v1.2
This example shows how to use the Grove Buzzer v1.2.
A Grover Buzzer, and PYNQ Grove Adapter are required.
To set up the board for this notebook, the PYNQ Grove Adapter is connected to PMODB and the Grove Buzzer is connected to G1 on the PYNQ Grove Adapter.
End of explanation
"""
from pynq.lib.pmod import Grove_Buzzer
from pynq.lib.pmod import PMOD_GROVE_G1
grove_buzzer = Grove_Buzzer(base.PMODB,PMOD_GROVE_G1)
"""
Explanation: 1. Illustrate playing a pre-defined melody
End of explanation
"""
grove_buzzer.play_melody()
"""
Explanation: 2. Play a piece of music
End of explanation
"""
# Play a tone
tone_period = 1200
num_cycles = 500
grove_buzzer.play_tone(tone_period,num_cycles)
"""
Explanation: 3. Generate a tone of desired period and for a desired number of times
The tone_period is in microseconds and the 50% duty cycle will be generated for the given tone_period
End of explanation
"""
from pynq.lib.arduino import Arduino_Analog
from pynq.lib.arduino import ARDUINO_GROVE_A1
analog1 = Arduino_Analog(base.ARDUINO,ARDUINO_GROVE_A1)
rounds = 200
for i in range(rounds):
tone_period = int(analog1.read_raw()[0]/5)
num_cycles = 500
grove_buzzer.play_tone(tone_period,50)
"""
Explanation: 4. Controlling the tone
This example will use a grove potentiometer to control the tone of the sound. Plug the potentiometer into A1 group on the shield.
End of explanation
"""
|
warrierr/cs109
|
hw0/hw0.ipynb
|
mit
|
import sys
print sys.version
"""
Explanation: Homework 0
Survey due 4th September, 2015
Submission due 10th September, 2015
Welcome to CS109 / STAT121 / AC209 / E-109 (http://cs109.org/). In this class, we will be using a variety of tools that will require some initial configuration. To ensure everything goes smoothly moving forward, we will setup the majority of those tools in this homework. It is very important that you do this setup as soon as possible. While some of this will likely be dull, doing it now will enable us to do more exciting work in the weeks that follow without getting bogged down in further software configuration. You will also be filling out a mandatory class survey and creating a github and AWS account, which are mandatory as well.
Please note that the survey is due on September 4th. The reason is that we need your github account name to set you up for the homework submission system. If you do not submit the survey on time you might not be able to submit the homework in time.
This homework will not be graded, however, you must submit it. Submission instructions, along with the github flow for homework, are at the end of this notebook. The practice you will get submitting this homework will be essential for the submission of the forthcoming homework notebooks and your project.
Table of Contents
Homework 0
Survey due 4th September, 2015
Submission due 10th September, 2015
First Things
1. Create your github account
2. Class Survey
3. Piazza
4. Programming expectations
5. If you do not have a .edu email address
Getting and installing Python
Installing Anaconda
Mac/Linux users
Windows Users
Troubleshooting
Setting up your git environment
1. Installing git
Windows specific notes
Mac specific notes
2. Optional: Creating ssh keys on your machine
3. Optional: Uploading ssh keys and Authentication
4. Setting global config for git
5. Github tutorial
Sign up for AWS
1. Get an AWS account
2. Sign up for AWS educate
Hello, Python
Python Libraries
Installing additional libraries
Testing latest libraries
Kicking the tires
Hello World
Hello matplotlib
Hello Numpy
The Monty Hall Problem
The workflow for homeworks and labs
getting and working on labs
getting and submitting homework
First Things
I cant stress this enough: Do this setup now!
These first things are incredibly important. You must absolutely fill these out to get into the swing of things...
1. Create your github account
If you do not have a github account as yet, create it at:
https://github.com
This step is mandatory. We will need your github username. We are using github for all aspects of this course, including
doing and submitting homework
collaborating on your project
creating your web site
To sign up for an account, just go to github and pick a unique username, an email address, and a password. Once you've done that, your github page will be at https://github.com/your-username.
Github also provides a student developer package. This is something that might be nice to have, but it is not necessary for the course. Github may take some time to approve your application for the package. Please note that this is optional and you do not have to have the package approved to fill out the survey.
2. Class Survey
Next, you must complete the mandatory course survey located here. It should only take a few moments of your time. Once you fill in the survey we will use the github username you provided to sign you up into the cs109-students organization on github. (see https://help.github.com/articles/how-do-i-access-my-organization-account/) It is imperative that you fill out the survey on time as we use the provided information to sign you in: your access to the homework depends on being in this organization.
3. Piazza
Go to Piazza and sign up for the class using your Harvard e-mail address. If you do not have a Harvard email address write an email to staff@cs109.org and one of the TFs will sign you up.
You will use Piazza as a forum for discussion, to find team members, to arrange appointments, and to ask questions. Piazza should be your primary form of communication with the staff. Use the staff e-mail (staff@cs109.org) only for individual requests, e.g., to excuse yourself from mandatory sections. All announcements, homework, and project descriptions will be posted on Piazza first.
Introduction
Once you are signed up to the Piazza course forum, introduce yourself to your classmates and course staff with a follow-up post in the introduction thread. Include your name/nickname, your affiliation, why you are taking this course, and tell us something interesting about yourself (e.g., an industry job, an unusual hobby, past travels, or a cool project you did, etc.). Also tell us whether you have experience with data science.
4. Programming expectations
All the assignments and labs for this class will use Python and, for the most part, the browser-based IPython notebook format you are currently viewing. Knowledge of Python is not a prerequisite for this course, provided you are comfortable learning on your own as needed. While we have strived to make the programming component of this course straightforward, we will not devote much time to teaching prorgramming or Python syntax. Basically, you should feel comfortable with:
How to look up Python syntax on Google and StackOverflow.
Basic programming concepts like functions, loops, arrays, dictionaries, strings, and if statements.
How to learn new libraries by reading documentation.
Asking questions on StackOverflow or Piazza.
There are many online tutorials to introduce you to scientific python programming. Here is a course that is very nice. Lectures 1-4 of this course are most relevant to this class. While we will cover some python programming in labs 1 and 2, we expect you to pick it up on the fly.
5. If you do not have a .edu email address
Please get one, as you will need it to sign up for AWS educate, and if you want to sign up for the student developer github package you will need it as well. As a DCE student you are eligible for a FAS account and you can sign up here.
Getting and installing Python
You will be using Python throughout the course, including many popular 3rd party Python libraries for scientific computing. Anaconda is an easy-to-install bundle of Python and most of these libraries. We strongly recommend that you use Anaconda for this course. If you insist on using your own Python setup instead of Anaconda, we will not provide any installation support, and are not responsible for you loosing points on homework assignments in case of inconsistencies.
For this course we are using Python 2, not Python 3.
Also see: http://docs.continuum.io/anaconda/install
The IPython or Jupyter notebook runs in the browser, and works best in Google Chrome or Safari for me. You probably want to use one of these for assignments in this course.
Installing Anaconda
The Anaconda Python distribution is an easily-installable bundle of Python and many of the libraries used throughout this class. Unless you have a good reason not to, we recommend that you use Anaconda.
Mac/Linux users
Download the appropriate version of Anaconda
Follow the instructions on that page to run the installer
Test out the IPython notebook: open a Terminal window, and type ipython notebook. Or use the Anaconda Launcher which might have been deposited on your desktop. A new browser window should pop up.
Click New Notebook to create a new notebook file. Trick: give this notebook a unique name, like my-little-rose. Use Spotlight (upper right corner of the mac desktop, looks like a maginifier) to search for this name. In this way, you will know which folder your notebook opens in by default.
Windows Users
Download the appropriate version of Anaconda
Follow the instructions on that page to run the installer. This will typically create a directory at C:\Anaconda
Test it out: start the Anaconda launcher, which you can find in C:\Anaconda or, in the Start menu. Start the IPython notebook. A new browser window should open.
Click New Notebook, which should open a new page. Trick: give this notebook a unique name, like my-little-rose. Use Explorer (usually start menu on windows desktops) to search for this name. In this way, you will know which folder your notebook opens in by default.
If you did not add Anaconda to your path, be sure to use the full path to the python and ipython executables, such as /anaconda/bin/python.
If you already have installed Anaconda at some point in the past, you can easily update to the latest Anaconda version by updating conda, then Anaconda as follows:
conda update conda
conda update anaconda
Troubleshooting
You must be careful to make sure you are running the Anaconda version of python, since those operating systems come preinstalled with their own versions of python.
End of explanation
"""
x = [10, 20, 30, 40, 50]
for item in x:
print "Item is ", item
"""
Explanation: Problem
When you start python, you don't see a line like Python 2.7.5 |Anaconda 1.6.1 (x86_64)|. You are using a Mac or Linux computer
Reason
You are most likely running a different version of Python, and need to modify your Path (the list of directories your computer looks through to find programs).
Solution
Find a file like .bash_profile, .bashrc, or .profile. Open the file in a text editor, and add a line at this line at the end: export PATH="$HOME/anaconda/bin:$PATH". Close the file, open a new terminal window, type source ~/.profile (or whatever file you just edited). Type which python -- you should see a path that points to the anaconda directory. If so, running python should load the proper version
If this doesn't work (typing which python doesn't point to anaconda), you might be using a different shell. Type echo $SHELL. If this isn't bash, you need to edit a different startup file (for example, if if echo $SHELL gives $csh, you need to edit your .cshrc file. The syntax for this file is slightly different: set PATH = ($HOME/anaconda/bin $PATH)
Problem
You are running the right version of python (see above item), but are unable to import numpy.
Reason
You are probably loading a different copy of numpy that is incompatible with Anaconda
Solution
See the above item to find your .bash_profile, .profile, or .bashrc file. Open it, and add the line unset PYTHONPATH at the end. Close the file, open a new terminal window, type source ~/.profile (or whatever file you just edited), and try again.
Problem
Under Windows, you receive an error message similar to the following: "'pip' is not recognized as an internal or external command, operable program or batch file."
Reason
The correct Anaconda paths might not be present in your PATH variable, or Anaconda might not have installed correctly.
Solution
Ensure the Anaconda directories to your path environment variable ("\Anaconda" and "\Anaconda\Scripts"). See this page for details.
If this does not correct the problem, reinstall Anaconda.
IF YOU ARE STILL HAVING ISSUES ON THE INSTALL, POST TO PIAZZA. WE'LL HELP YOU THERE. OR ASK IN YOUR SECTION
Setting up your git environment
1. Installing git
We will be using the command line version of git.
On linux, install git using your system package manager (yum, apt-get, etc)
On the Mac, if you ever installed Xcode, you should have git installed. Or you might have installed it using homebrew. Either of these are fine as long as the git version is greater than 2.0
Otherwise, on Mac and Windows, go to http://git-scm.com. Accept all defaults in the installation process. On Windows, installing git will also install for you a minimal unix environment with a "bash" shell and terminal window. Voila, your windows computer is transformed into a unixy form.
Windows specific notes
There will be an installer .exe file you need to click. Accept all the defaults.
Here is a screenshot from one of the defaults. It makes sure you will have the "bash" tool talked about earlier.
Choose the default line-encoding conversion:
Use the terminal emulator they provide, its better than the one shipped with windows.
Towards the end, you might see a message like this. It looks scary, but all you need to do is click "Continue"
At this point you will be installed. You can bring up "git bash" either from your start menu, or from the right click menu on any folder background. When you do so, a terminal window will open. This terminal is where you will issue further git setup commands, and git commands in general.
Get familiar with the terminal. It opens in your home folder, and maps \\ paths on windows to more web/unix like paths with '/'. Try issuing the commands ls, pwd, and cd folder where folder is one of the folders you see when you do a ls. You can do a cd .. to come back up.
You can also use the terminal which comes with the ipython notebook. More about that later.
Mac specific notes
As mentioned earlier, if you ever installed Xcode or the "Command Line Developer tools", you may already have git.
Make sure its version 2.0 or higher. (git --version)
Or if you use Homebrew, you can install it from there. The current version on homebrew is 2.4.3
You dont need to do anyting more in this section.
First click on the .mpkg file that comes when you open the downloaded .dmg file.
When I tried to install git on my mac, I got a warning saying my security preferences wouldnt allow it to be installed. So I opened my system preferences and went to "Security".
Here you must click "Open Anyway", and the installer will run.
The installer puts git as /usr/local/git/bin/git. Thats not a particularly useful spot. Open up Terminal.app.Its usually in /Applications/Utilities. Once the terminal opens up, issue
sudo ln -s /usr/local/git/bin/git /usr/local/bin/git.
Keep the Terminal application handy in your dock. (You could also download and use iTerm.app, which is a nicer terminal, if you are into terminal geekery). We'll be using the terminal extensively for git. You can also use the terminal which comes with the ipython notebook. More about that later.
Try issuing the commands ls, pwd, and cd folder where folder is one of the folders you see when you do a ls. You can do a cd .. to come back up.
2. Optional: Creating ssh keys on your machine
This ia an optional step. But it makes things much easier.
There are two ways git talks to github: https, which is a web based protocol
or over ssh
Which one you use is your choice. I recommend ssh, and the github urls in this homework and in labs will be ssh urls.
Every time you contact your upstream repository (hosted on github), you need to prove you're you. You can do this with passwords over HTTPS, but it gets old quickly. By providing an ssh public key to github, your ssh-agent will handle all of that for you, and you wont have to put in any passwords.
At your terminal, issue the command (skip this if you are a seasoned ssh user and already have keys):
ssh-keygen -t rsa
It will look like this:
Accept the defaults. When it asks for a passphrase for your keys, put in none. (you can put in one if you know how to set up a ssh-agent).
This will create two files for you, in your home folder if you accepted the defaults.
id_rsa is your PRIVATE key. NEVER NEVER NEVER give that to anyone. id_rsa.pub is your public key. You must supply this to github.
3. Optional: Uploading ssh keys and Authentication
To upload an ssh key, log in to github and click on the gear icon in the top right corner (settings). Once you're there, click on "SSH keys" on the left. This page will contain all your ssh keys once you upload any.
Click on "add ssh key" in the top right. You should see this box:
<img src="github_ssh.png" alt="github ssh" style="width: 500px;"/>
The title field should be the name of your computer or some other way to identify this particular ssh key.
In the key field, you'll need to copy and paste your public key. Do not paste your private ssh key here.
When you hit "Add key", you should see the key name and some hexadecimal characters show up in the list. You're set.
Now, whenever you clone a repository using this form:
$ git clone git@github.com:rdadolf/ac297r-git-demo.git,
you'll be connecting over ssh, and will not be asked for your github password
You will need to repeat steps 2 and 3 of the setup for each computer you wish to use with github.
4. Setting global config for git
Again, from the terminal, issue the command
git config --global user.name "YOUR NAME"
This sets up a name for you. Then do
git config --global user.email "YOUR EMAIL ADDRESS"
Use the SAME email address you used in setting up your github account.
These commands set up your global configuration. On my Mac, these are stored in the text file .gitconfig in my home folder.
5. Github tutorial
Read our git and github tutorial from Lab 1. Then come back here.
If you have any issues or questions: Ask us! On Piazza or in Sections!
Sign up for AWS
For the course you need to sign up for Amazon Web Services (AWS).
The sign up process has two steps:
Get an AWS account
Sign up for AWS educate
The AWS account will enable you to access Amazon's webservices. The AWS educate sign up will provide you with $100 worth of free credits.
1. Get an AWS account
Note: You can skip this step if you already have an account.
Go to this webpage
Click on the yellow box in the upper right corner saying "Create an AWS account"
Follow the normal instructions and fill in all necessary information to create your account.
Once you have an account you need your account ID. The account ID is a 12 digit number.
Please follow this description to find your ID in the Support menu of your AWS console.
2. Sign up for AWS educate
Note: You will need your 12 digit AWS account ID for this step.
Go to this webpage
Click on the right on the button saying "Apply for AWS Educate for Students"
Confirm that you are a student
Fill out the form
Note that that you provide should come from your institution, which means it should end in .edu
It might take a few days for your request to be approved.
Once again, ping us if you need help!
Hello, Python
The IPython/Jupyter notebook is an application to build interactive computational notebooks. You'll be using them to complete labs and homework. Once you've set up Python, please download this page, and open it with IPython by typing
ipython notebook <name_of_downloaded_file>
You can also open the notebook in any folder by cding to the folder in the terminal, and typing
ipython notebook .
in that folder.
The anaconda install also probably dropped a launcher on your desktop. You can use the launcher, and select "ipython notebbok" or "jupyter notebook" from there. In this case you will need to find out which folder you are running in.
It loolks like this for me:
Notice that you can use the user interface to create new folders and text files, and even open new terminals, all of which might come useful to you. To create a new notebook, you can use "Python 2" under notebooks. You may not have the other choices available (I have julia for example, which is another language that uses the same notebook interface).
For the rest of the assignment, use your local copy of this page, running on IPython.
Notebooks are composed of many "cells", which can contain text (like this one), or code (like the one below). Double click on the cell below, and evaluate it by clicking the "play" button above, for by hitting shift + enter
End of explanation
"""
#IPython is what you are using now to run the notebook
import IPython
print "IPython version: %6.6s (need at least 3.0.0)" % IPython.__version__
# Numpy is a library for working with Arrays
import numpy as np
print "Numpy version: %6.6s (need at least 1.9.1)" % np.__version__
# SciPy implements many different numerical algorithms
import scipy as sp
print "SciPy version: %6.6s (need at least 0.15.1)" % sp.__version__
# Pandas makes working with data tables easier
import pandas as pd
print "Pandas version: %6.6s (need at least 0.16.2)" % pd.__version__
# Module for plotting
import matplotlib
print "Mapltolib version: %6.6s (need at least 1.4.1)" % matplotlib.__version__
# SciKit Learn implements several Machine Learning algorithms
import sklearn
print "Scikit-Learn version: %6.6s (need at least 0.16.1)" % sklearn.__version__
# Requests is a library for getting data from the Web
import requests
print "requests version: %6.6s (need at least 2.0.0)" % requests.__version__
#BeautifulSoup is a library to parse HTML and XML documents
import bs4
print "BeautifulSoup version:%6.6s (need at least 4.4)" % bs4.__version__
import pyquery
print "Loaded PyQuery"
"""
Explanation: Python Libraries
Installing additional libraries
Anaconda includes most of the libraries we will use in this course, but you will need to install a few extra ones for the beginning of this course:
BeautifulSoup
Seaborn
PyQuery
The recommended way to install these packages is to run
!pip install BeautifulSoup seaborn pyquery
in a code cell in the ipython notebook you just created. On windows, you might want to run pip install BeautifulSoup seaborn pyquery on the git-bash.exe terminal (note, the exclamation goes away).
If this doesn't work, you can download the source code, and run python setup.py install from the source code directory. On Unix machines(Mac or Linux), either of these commands may require sudo (i.e. sudo pip install... or sudo python)
If you've successfully completed the above install, all of the following statements should run.
Testing latest libraries
End of explanation
"""
# The %... is an iPython thing, and is not part of the Python language.
# In this case we're just telling the plotting library to draw things on
# the notebook, instead of on a separate window.
%matplotlib inline
#this line above prepares IPython notebook for working with matplotlib
# See all the "as ..." contructs? They're just aliasing the package names.
# That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot().
import numpy as np # imports a fast numerical programming library
import scipy as sp #imports stats functions, amongst other things
import matplotlib as mpl # this actually imports matplotlib
import matplotlib.cm as cm #allows us easy access to colormaps
import matplotlib.pyplot as plt #sets up plotting under plt
import pandas as pd #lets us handle data as dataframes
#sets up pandas table display
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns #sets up styles and gives us more plotting options
"""
Explanation: If any of these libraries are missing or out of date, you will need to install them and restart IPython.
Kicking the tires
Lets try some things, starting from very simple, to more complex.
Hello World
The following is the incantation we like to put at the beginning of every notebook. It loads most of the stuff we will regularly use.
End of explanation
"""
x = np.linspace(0, 10, 30) #array of 30 points from 0 to 10
y = np.sin(x)
z = y + np.random.normal(size=30) * .2
plt.plot(x, y, 'o-', label='A sine wave')
plt.plot(x, z, '-', label='Noisy sine')
plt.legend(loc = 'lower right')
plt.xlabel("X axis")
plt.ylabel("Y axis")
print y
print type(y)
"""
Explanation: Hello matplotlib
The notebook integrates nicely with Matplotlib, the primary plotting package for python. This should embed a figure of a sine wave:
End of explanation
"""
print "Make a 3 row x 4 column array of random numbers"
x = np.random.random((3, 4))
print x
print
print "Add 1 to every element"
x = x + 1
print x
print
print "Get the element at row 1, column 2"
print x[1, 2]
print x[0,0]
# The colon syntax is called "slicing" the array.
print "Get the first row"
print x[0, :]
print
print "Get every 2nd column of the first row"
print x[0, ::2]
print
"""
Explanation: If that last cell complained about the %matplotlib line, you need to update IPython to v1.0, and restart the notebook. See the installation page
Hello Numpy
The Numpy array processing library is the basis of nearly all numerical computing in Python. Here's a 30 second crash course. For more details, consult Chapter 4 of Python for Data Analysis, or the Numpy User's Guide
End of explanation
"""
print "Max is ", x.max()
print "Min is ", x.min()
print "Mean is ", x.mean()
x.std()
"""
Explanation: Print the maximum, minimum, and mean of the array. This does not require writing a loop. In the code cell below, type x.m<TAB>, to find built-in operations for common array statistics like this
End of explanation
"""
print x.max(axis=1)
"""
Explanation: Call the x.max function again, but use the axis keyword to print the maximum of each row in x.
End of explanation
"""
x = np.random.binomial(500, .5)
print "number of heads:", x
"""
Explanation: Here's a way to quickly simulate 500 coin "fair" coin tosses (where the probabily of getting Heads is 50%, or 0.5)
End of explanation
"""
# 3 ways to run the simulations
# loop
heads = []
for i in range(500):
heads.append(np.random.binomial(500, .5))
# "list comprehension"
heads = [np.random.binomial(500, .5) for i in range(500)]
# pure numpy
heads = np.random.binomial(500, .5, size=500)
histogram = plt.hist(heads, bins=10)
heads.shape
"""
Explanation: Repeat this simulation 500 times, and use the plt.hist() function to plot a histogram of the number of Heads (1s) in each simulation
End of explanation
"""
"""
Function
--------
simulate_prizedoor
Generate a random array of 0s, 1s, and 2s, representing
hiding a prize between door 0, door 1, and door 2
Parameters
----------
nsim : int
The number of simulations to run
Returns
-------
sims : array
Random array of 0s, 1s, and 2s
Example
-------
>>> print simulate_prizedoor(3)
array([0, 0, 2])
"""
def simulate_prizedoor(nsim):
return np.random.randint(0, 3, (nsim))
np.random.randint(0, 3, size=11)
"""
Explanation: The Monty Hall Problem
Here's a fun and perhaps surprising statistical riddle, and a good way to get some practice writing python functions
In a gameshow, contestants try to guess which of 3 closed doors contain a cash prize (goats are behind the other two doors). Of course, the odds of choosing the correct door are 1 in 3. As a twist, the host of the show occasionally opens a door after a contestant makes his or her choice. This door is always one of the two the contestant did not pick, and is also always one of the goat doors (note that it is always possible to do this, since there are two goat doors). At this point, the contestant has the option of keeping his or her original choice, or swtiching to the other unopened door. The question is: is there any benefit to switching doors? The answer surprises many people who haven't heard the question before.
We can answer the problem by running simulations in Python. We'll do it in several parts.
First, write a function called simulate_prizedoor. This function will simulate the location of the prize in many games -- see the detailed specification below:
End of explanation
"""
"""
Function
--------
simulate_guess
Return any strategy for guessing which door a prize is behind. This
could be a random strategy, one that always guesses 2, whatever.
Parameters
----------
nsim : int
The number of simulations to generate guesses for
Returns
-------
guesses : array
An array of guesses. Each guess is a 0, 1, or 2
Example
-------
>>> print simulate_guess(5)
array([0, 0, 0, 0, 0])
"""
def simulate_guess(nsim):
return np.zeros(nsim, dtype=np.int)
"""
Explanation: Next, write a function that simulates the contestant's guesses for nsim simulations. Call this function simulate_guess. The specs:
End of explanation
"""
"""
Function
--------
goat_door
Simulate the opening of a "goat door" that doesn't contain the prize,
and is different from the contestants guess
Parameters
----------
prizedoors : array
The door that the prize is behind in each simulation
guesses : array
THe door that the contestant guessed in each simulation
Returns
-------
goats : array
The goat door that is opened for each simulation. Each item is 0, 1, or 2, and is different
from both prizedoors and guesses
Examples
--------
>>> print goat_door(np.array([0, 1, 2]), np.array([1, 1, 1]))
>>> array([2, 2, 0])
"""
def goat_door(prizedoors, guesses):
#strategy: generate random answers, and
#keep updating until they satisfy the rule
#that they aren't a prizedoor or a guess
result = np.random.randint(0, 3, prizedoors.size)
while True:
bad = (result == prizedoors) | (result == guesses)
if not bad.any():
return result
result[bad] = np.random.randint(0, 3, bad.sum())
# np vectorized setting using boolean mask
x = np.array([1,4,5,6,7,7,7,7,7,7])
x[np.random.choice([True, False], size=x.size)] = -1
x
"""
Explanation: Next, write a function, goat_door, to simulate randomly revealing one of the goat doors that a contestant didn't pick.
End of explanation
"""
"""
Function
--------
switch_guess
The strategy that always switches a guess after the goat door is opened
Parameters
----------
guesses : array
Array of original guesses, for each simulation
goatdoors : array
Array of revealed goat doors for each simulation
Returns
-------
The new door after switching. Should be different from both guesses and goatdoors
Examples
--------
>>> print switch_guess(np.array([0, 1, 2]), np.array([1, 2, 1]))
>>> array([2, 0, 0])
"""
def switch_guess(guesses, goatdoors):
result = np.zeros(guesses.size)
switch = {(0, 1): 2, (0, 2): 1, (1, 0): 2, (1, 2): 0, (2, 0): 1, (2, 1): 0}
for i in [0, 1, 2]:
for j in [0, 1, 2]:
mask = (guesses == i) & (goatdoors == j)
if not mask.any():
continue
result = np.where(mask, np.ones_like(result) * switch[(i, j)], result)
return result
"""
Explanation: Write a function, switch_guess, that represents the strategy of always switching a guess after the goat door is opened.
End of explanation
"""
"""
Function
--------
win_percentage
Calculate the percent of times that a simulation of guesses is correct
Parameters
-----------
guesses : array
Guesses for each simulation
prizedoors : array
Location of prize for each simulation
Returns
--------
percentage : number between 0 and 100
The win percentage
Examples
---------
>>> print win_percentage(np.array([0, 1, 2]), np.array([0, 0, 0]))
33.333
"""
def win_percentage(guesses, prizedoors):
return 100 * (guesses == prizedoors).mean()
"""
Explanation: Last function: write a win_percentage function that takes an array of guesses and prizedoors, and returns the percent of correct guesses
End of explanation
"""
nsim = 10000
#keep guesses
print "Win percentage when keeping original door"
print win_percentage(simulate_prizedoor(nsim), simulate_guess(nsim))
#switch
pd = simulate_prizedoor(nsim)
guess = simulate_guess(nsim)
goats = goat_door(pd, guess)
guess = switch_guess(guess, goats)
print "Win percentage when switching doors"
print win_percentage(pd, guess).mean()
"""
Explanation: Now, put it together. Simulate 10000 games where contestant keeps his original guess, and 10000 games where the contestant switches his door after a goat door is revealed. Compute the percentage of time the contestant wins under either strategy. Is one strategy better than the other?
End of explanation
"""
|
teuben/astr288p
|
notebooks/03-arrays.ipynb
|
mit
|
a = [1,2,3]
b = [4,5,6]
c = a+b
print(c)
"""
Explanation: Arrays for Numerical work?
End of explanation
"""
a.append(b)
print(a)
def sum(data):
""" sum the elements of an array
"""
asum = 0.0
for i in data:
asum = asum + i
return asum
# the length of the array is defined here, and re-used below
# to test performance, we can make this number very large
# 1000, 1000000 etc.
n = 10
%%time
a = list(range(n))
%%time
print(sum(a))
import numpy as np
%%time
a=np.arange(n)
%%time
print(sum(a))
#%%time
%time print(a.sum())
%time print(a.sum())
a=np.arange(10)
b=np.arange(10)
c = a + b
d = 3*a*a + b + 2.0
print(c)
print(d)
c.shape
c2=c.reshape(5,2)
c3=c.reshape(2,5)
print(c)
print(c2)
print(c3)
type(c)
c[0]=999
print(c2)
d2=c.reshape(5,2)[1:3,:]
print(d2)
d2[1,1]=888
print(c)
print(c2)
"""
Explanation: You can see, adding two lists just results in a longer list, catenation of the two.
End of explanation
"""
|
tomquisel/wine-tasting
|
Wine Tasting Analysis.ipynb
|
mit
|
%matplotlib inline
import pylab as plt
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats
import datetime as dt
import random
from IPython.display import display
sns.set_context("notebook", font_scale=2)
raw_data = pd.read_csv('/Users/tom/Downloads/Wine Tasting Data - Sheet1.csv')
raw_data['tasting_order'] = raw_data.groupby('person_name').cumcount() + 1
wine_mapping = pd.read_csv('/Users/tom/Downloads/Wine Mapping - Sheet1.csv')
def make_display_name(wines):
if len(wines.variety.unique()) == 1:
res = wines.wine_name.values
else:
res = wines.wine_name.values + ' - ' + wines.variety.values
return pd.Series(res, index=wines.number, name='wine_display_name')
display_names = wine_mapping.groupby('wine_name').apply(make_display_name)
#wine_mapping.merge(wine_mapping.groupby('wine_name').apply(make_display_name), left_on='number', right_index=True)
display_names = display_names.reset_index()
del display_names['wine_name']
wine_mapping = wine_mapping.merge(display_names, on='number')
data = raw_data.merge(wine_mapping, left_on='wine_number', right_on='number')
"""
Explanation: Below are the results from our wine tasting. It had 55 participants and 30 wines selected randomly from the shelves at Trader Joes's.
End of explanation
"""
wine_mapping.variety.value_counts().plot(kind='barh', title='histogram of wine varieties')
plt.xlabel('count')
"""
Explanation: Counts of Wine Varieties
End of explanation
"""
data.groupby('variety').score.mean().sort_values(ascending=False).plot(kind='barh', figsize=(10,6), title='Score by variety of wine')
plt.xlabel('average score')
"""
Explanation: Best and worst varieties of wine
End of explanation
"""
sorted_wines = data.groupby('wine_display_name').score.mean().sort_values(ascending=False)
sorted_wines.plot(kind='barh', figsize=(16,10), title='Average score for each wine')
plt.xlabel('average score')
"""
Explanation: Best and worst wines
End of explanation
"""
data['type'] = data['type'].str.strip()
red_vs_white = pd.DataFrame(data.groupby('type').score.mean().sort_values())
red_vs_white.columns = ['average score']
red_vs_white
"""
Explanation: Red vs. white
End of explanation
"""
data.groupby('person_name').score.mean().sort_values(ascending=False).plot(kind='barh', figsize=(16, 20), title='average score by person')
plt.xlabel('average score')
"""
Explanation: Who liked the wines the most?
End of explanation
"""
sns.lmplot(x="price", y="score", data=data, size=8, x_jitter=0.3, y_jitter=0.3)
plt.title('Relationship of price and score')
plt.ylim(ymin=0)
"""
Explanation: Relationship of price and score
End of explanation
"""
sns.lmplot(x="price", y="score", data=data[data.price < 10.0], size=8, x_jitter=0.3, y_jitter=0.3)
plt.title('Relationship of price and score, wines under $10')
plt.ylim(ymin=0)
"""
Explanation: The shaded blue area is the 95% confidence interval for the linear regression fit. There's no significant correlation.
End of explanation
"""
sns.lmplot(x="price", y="score", data=data[data.price >= 10], size=8, x_jitter=0.3, y_jitter=0.3)
plt.title('Relationship of price and score, wines $10+')
"""
Explanation: Similarly, no significant correlation
End of explanation
"""
sns.lmplot(x="price", y="price_guess", data=data, size=8, x_jitter=0.2, y_jitter=0.2)
plt.title('Relationship of price and price guess')
plt.ylim(ymin=0, ymax=60)
"""
Explanation: There's a significant negative correlation, as you go above $10, wines get worse on average (for this sample of wines).
Relationship of price and price guess
End of explanation
"""
data['abs_price_error'] = (data['price'] - data['price_guess']).abs()
price_guesses = data.groupby('person_name').abs_price_error.agg(['mean', 'count']).sort_values('mean')
price_guesses.columns = ['Avg. Price Error', 'Number of Wines Priced']
price_guesses.dropna()
"""
Explanation: No significant correlation
Best price guessers
End of explanation
"""
data.groupby('tasting_order').score.mean().plot()
plt.ylabel('average score')
"""
Explanation: Score as the night wore on
End of explanation
"""
data.groupby('tasting_order').wine_number.count().plot(ylim=0)
"""
Explanation: There's no obvious trend in score as people tasted more wines, it looks random.
Tasting dedication
End of explanation
"""
def get_avg_score_diff(df):
if len(df) > 1:
return df.score.diff().abs().mean()
same_wine_avg_diff = data.groupby(['person_name', 'wine_name', 'variety']).apply(get_avg_score_diff).dropna().mean()
same_person_avg_diff = data.groupby(['person_name']).apply(get_avg_score_diff).dropna().mean()
same_wine_diffs = pd.Series([same_wine_avg_diff, same_person_avg_diff], index=['same person same wine', 'same person different wine'])
same_wine_diffs = pd.DataFrame(same_wine_diffs, columns=['average score difference'])
same_wine_diffs
"""
Explanation: An impressive number of people tasted all the wines!
Trying the same wine twice
End of explanation
"""
data.to_csv('full_wine_tasting_results.csv', index=False)
"""
Explanation: On average, when a single person tasted the same wine twice and didn't know it, they gave it a score that differed by 1.04 points. This is only slightly smaller than the average difference of a person scoring two different wines, which was 1.16 points. This indicates that most of the variation in scores is due to factors other than the wine: randomness, what wine you tasted before, etc...
End of explanation
"""
|
GoogleCloudPlatform/mlops-on-gcp
|
immersion/explainable_ai/solutions/xai_structured_caip.ipynb
|
apache-2.0
|
import os
PROJECT_ID = "" # TODO: your PROJECT_ID here.
os.environ["PROJECT_ID"] = PROJECT_ID
BUCKET_NAME = "" # TODO: your BUCKET_NAME here.
REGION = "us-central1"
os.environ['BUCKET_NAME'] = BUCKET_NAME
os.environ['REGION'] = REGION
"""
Explanation: AI Explanations: Explaining a tabular data model
Overview
In this tutorial we will perform the following steps:
Build and train a Keras model.
Export the Keras model as a TF 1 SavedModel and deploy the model on Cloud AI Platform.
Compute explainations for our model's predictions using Explainable AI on Cloud AI Platform.
Dataset
The dataset used for this tutorial was created from a BigQuery Public Dataset: NYC 2018 Yellow Taxi data.
Objective
The goal is to train a model using the Keras Sequential API that predicts how much a customer is compelled to pay (fares + tolls) for a taxi ride given the pickup location, dropoff location, the day of the week, and the hour of the day.
This tutorial focuses more on deploying the model to AI Explanations than on the design of the model itself. We will be using preprocessed data for this lab. If you wish to know more about the data and how it was preprocessed please see this notebook.
Setup
End of explanation
"""
%%bash
exists=$(gsutil ls -d | grep -w gs://${BUCKET_NAME}/)
if [ -n "$exists" ]; then
echo -e "Bucket gs://${BUCKET_NAME} already exists."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET_NAME}
echo -e "\nHere are your current buckets:"
gsutil ls
fi
"""
Explanation: Run the following cell to create your Cloud Storage bucket if it does not already exist.
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, we create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
End of explanation
"""
import tensorflow as tf
import pandas as pd
# should be >= 2.1
print("Tensorflow version " + tf.__version__)
if tf.__version__ < "2.1":
raise Exception("TF 2.1 or greater is required")
!pip install explainable-ai-sdk
import explainable_ai_sdk
"""
Explanation: Import libraries
Import the libraries for this tutorial. This tutorial has been tested with TensorFlow versions 2.3.
End of explanation
"""
# Copy the data to your notebook instance
! gsutil cp 'gs://explanations_sample_data/bike-data.csv' ./
"""
Explanation: Download and preprocess the data
In this section you'll download the data to train your model from a public GCS bucket. The original data is from the BigQuery datasets linked above. For your convenience, we've joined the London bike and NOAA weather tables, done some preprocessing, and provided a subset of that dataset here.
End of explanation
"""
data = pd.read_csv('bike-data.csv')
# Shuffle the data
data = data.sample(frac=1, random_state=2)
# Drop rows with null values
data = data[data['wdsp'] != 999.9]
data = data[data['dewp'] != 9999.9]
# Rename some columns for readability
data = data.rename(columns={'day_of_week': 'weekday'})
data = data.rename(columns={'max': 'max_temp'})
data = data.rename(columns={'dewp': 'dew_point'})
# Drop columns you won't use to train this model
data = data.drop(columns=['start_station_name', 'end_station_name', 'bike_id', 'snow_ice_pellets'])
# Convert trip duration from seconds to minutes so it's easier to understand
data['duration'] = data['duration'].apply(lambda x: float(x / 60))
# Preview the first 5 rows of training data
data.head()
"""
Explanation: Read the data with Pandas
You'll use Pandas to read the data into a DataFrame and then do some additional pre-processing.
End of explanation
"""
# Save duration to its own DataFrame and remove it from the original DataFrame
labels = data['duration']
data = data.drop(columns=['duration'])
"""
Explanation: Next, you will separate the data into features ('data') and labels ('labels').
End of explanation
"""
# Use 80/20 train/test split
train_size = int(len(data) * .8)
print("Train size: %d" % train_size)
print("Test size: %d" % (len(data) - train_size))
# Split your data into train and test sets
train_data = data[:train_size]
train_labels = labels[:train_size]
test_data = data[train_size:]
test_labels = labels[train_size:]
"""
Explanation: Split data into train and test sets
You'll split your data into train and test sets using an 80 / 20 train / test split.
End of explanation
"""
# Build your model
model = tf.keras.Sequential(name="bike_predict")
model.add(tf.keras.layers.Dense(64, input_dim=len(train_data.iloc[0]), activation='relu'))
model.add(tf.keras.layers.Dense(32, activation='relu'))
model.add(tf.keras.layers.Dense(1))
# Compile the model and see a summary
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(loss='mean_squared_logarithmic_error', optimizer=optimizer)
model.summary()
"""
Explanation: Build, train, and evaluate our model with Keras
This section shows how to build, train, evaluate, and get local predictions from a model by using the Keras Sequential API. The model will takes your 10 features as input and predict the trip duration in minutes.
End of explanation
"""
batch_size = 256
epochs = 3
input_train = tf.data.Dataset.from_tensor_slices(train_data)
output_train = tf.data.Dataset.from_tensor_slices(train_labels)
input_train = input_train.batch(batch_size).repeat()
output_train = output_train.batch(batch_size).repeat()
train_dataset = tf.data.Dataset.zip((input_train, output_train))
"""
Explanation: Create an input data pipeline with tf.data
Per best practices, we will use tf.Data to create our input data pipeline. Our data is all in an in-memory dataframe, so we will use tf.data.Dataset.from_tensor_slices to create our pipeline.
End of explanation
"""
# This will take about a minute to run
# To keep training time short, you're not using the full dataset
model.fit(train_dataset, steps_per_epoch=train_size // batch_size, epochs=epochs)
"""
Explanation: Train the model
Now we train the model. We will specify a number of epochs which to train the model and tell the model how many steps to expect per epoch.
End of explanation
"""
# Run evaluation
results = model.evaluate(test_data, test_labels)
print(results)
# Send test instances to model for prediction
predict = model.predict(test_data[:5])
# Preview predictions on the first 5 examples from your test dataset
for i, val in enumerate(predict):
print('Predicted duration: {}'.format(round(val[0])))
print('Actual duration: {} \n'.format(test_labels.iloc[i]))
"""
Explanation: Evaluate the trained model locally
End of explanation
"""
export_path = 'gs://' + BUCKET_NAME + '/explanations/mymodel'
model.save(export_path)
print(export_path)
"""
Explanation: Export the model as a TF 2.x SavedModel
When using TensorFlow 2.x, you export the model as a SavedModel and load it into Cloud Storage.
End of explanation
"""
! saved_model_cli show --dir $export_path --all
"""
Explanation: Use TensorFlow's saved_model_cli to inspect the model's SignatureDef. We'll use this information when we deploy our model to AI Explanations in the next section.
End of explanation
"""
# Print the names of your tensors
print('Model input tensor: ', model.input.name)
print('Model output tensor: ', model.output.name)
from explainable_ai_sdk.metadata.tf.v2 import SavedModelMetadataBuilder
builder = SavedModelMetadataBuilder(export_path)
builder.set_numeric_metadata(
model.input.name.split(':')[0],
input_baselines=[train_data.median().values.tolist()],
index_feature_mapping=train_data.columns.tolist()
)
builder.save_metadata(export_path)
"""
Explanation: Deploy the model to AI Explanations
In order to deploy the model to Explanations, you need to generate an explanations_metadata.json file and upload this to the Cloud Storage bucket with your SavedModel. Then you'll deploy the model using gcloud.
Prepare explanation metadata
In order to deploy this model to AI Explanations, you need to create an explanation_metadata.json file with information about your model inputs, outputs, and baseline. You can use the Explainable AI SDK to generate most of the fields.
The value for input_baselines tells the explanations service what the baseline input should be for your model. Here you're using the median for all of your input features. That means the baseline prediction for this model will be the trip duration your model predicts for the median of each feature in your dataset.
Since this model accepts a single numpy array with all numerical feature, you can optionally pass an index_feature_mapping list to AI Explanations to make the API response easier to parse. When you provide a list of feature names via this parameter, the service will return a key / value mapping of each feature with its corresponding attribution value.
End of explanation
"""
import datetime
MODEL = 'bike' + datetime.datetime.now().strftime("%d%m%Y%H%M%S")
# Create the model if it doesn't exist yet (you only need to run this once)
! gcloud ai-platform models create $MODEL --enable-logging --region=$REGION
"""
Explanation: Since this is a regression model (predicting a numerical value), the baseline prediction will be the same for every example we send to the model. If this were instead a classification model, each class would have a different baseline prediction.
Create the model
End of explanation
"""
# Each time you create a version the name should be unique
VERSION = 'v1'
# Create the version with gcloud
explain_method = 'integrated-gradients'
! gcloud beta ai-platform versions create $VERSION \
--model $MODEL \
--origin $export_path \
--runtime-version 2.1 \
--framework TENSORFLOW \
--python-version 3.7 \
--machine-type n1-standard-4 \
--explanation-method $explain_method \
--num-integral-steps 25 \
--region $REGION
# Make sure the model deployed correctly. State should be `READY` in the following log
! gcloud ai-platform versions describe $VERSION --model $MODEL --region $REGION
"""
Explanation: Create the model version
Creating the version will take ~5-10 minutes. Note that your first deploy could take longer.
End of explanation
"""
# Format data for prediction to your model
prediction_json = {model.input.name.split(':')[0]: test_data.iloc[0].values.tolist()}
"""
Explanation: Get predictions and explanations
Now that your model is deployed, you can use the AI Platform Prediction API to get feature attributions. You'll pass it a single test example here and see which features were most important in the model's prediction. Here you'll use the Explainable AI SDK to get your prediction and explanation. You can also use gcloud.
Format your explanation request
To make your AI Explanations request, you need to create a JSON object with your test data for prediction.
End of explanation
"""
remote_ig_model = explainable_ai_sdk.load_model_from_ai_platform(project=PROJECT_ID,
model=MODEL,
version=VERSION,
region=REGION)
ig_response = remote_ig_model.explain([prediction_json])
"""
Explanation: Send the explain request
You can use the Explainable AI SDK to send explanation requests to your deployed model.
End of explanation
"""
attr = ig_response[0].get_attribution()
predicted = round(attr.example_score, 2)
print('Predicted duration: ' + str(predicted) + ' minutes')
print('Actual duration: ' + str(test_labels.iloc[0]) + ' minutes')
"""
Explanation: Understanding the explanations response
First, let's look at the trip duration your model predicted and compare it to the actual value.
End of explanation
"""
ig_response[0].visualize_attributions()
"""
Explanation: Next let's look at the feature attributions for this particular example. Positive attribution values mean a particular feature pushed your model prediction up by that amount, and vice versa for negative attribution values.
End of explanation
"""
# Prepare 10 test examples to your model for prediction
pred_batch = []
for i in range(10):
pred_batch.append({model.input.name.split(':')[0]: test_data.iloc[i].values.tolist()})
test_response = remote_ig_model.explain(pred_batch)
"""
Explanation: Check your explanations and baselines
To better make sense of the feature attributions you're getting, you should compare them with your model's baseline. In most cases, the sum of your attribution values + the baseline should be very close to your model's predicted value for each input. Also note that for regression models, the baseline_score returned from AI Explanations will be the same for each example sent to your model. For classification models, each class will have its own baseline.
In this section you'll send 10 test examples to your model for prediction in order to compare the feature attributions with the baseline. Then you'll run each test example's attributions through two sanity checks in the sanity_check_explanations method.
End of explanation
"""
def sanity_check_explanations(example, mean_tgt_value=None, variance_tgt_value=None):
passed_test = 0
total_test = 1
# `attributions` is a dict where keys are the feature names
# and values are the feature attributions for each feature
attr = example.get_attribution()
baseline_score = attr.baseline_score
# sum_with_baseline = np.sum(attribution_vals) + baseline_score
predicted_val = attr.example_score
# Sanity check 1
# The prediction at the input is equal to that at the baseline.
# Please use a different baseline. Some suggestions are: random input, training
# set mean.
if abs(predicted_val - baseline_score) <= 0.05:
print('Warning: example score and baseline score are too close.')
print('You might not get attributions.')
else:
passed_test += 1
# Sanity check 2 (only for models using Integrated Gradient explanations)
# Ideally, the sum of the integrated gradients must be equal to the difference
# in the prediction probability at the input and baseline. Any discrepency in
# these two values is due to the errors in approximating the integral.
if explain_method == 'integrated-gradients':
total_test += 1
want_integral = predicted_val - baseline_score
got_integral = sum(attr.post_processed_attributions.values())
if abs(want_integral - got_integral) / abs(want_integral) > 0.05:
print('Warning: Integral approximation error exceeds 5%.')
print('Please try increasing the number of integrated gradient steps.')
else:
passed_test += 1
print(passed_test, ' out of ', total_test, ' sanity checks passed.')
for response in test_response:
sanity_check_explanations(response)
"""
Explanation: In the function below you perform two sanity checks for models using Integrated Gradient (IG) explanations and one sanity check for models using Sampled Shapley.
End of explanation
"""
# This is the number of data points you'll send to the What-if Tool
WHAT_IF_TOOL_SIZE = 500
from witwidget.notebook.visualization import WitWidget, WitConfigBuilder
def create_list(ex_dict):
new_list = []
for i in feature_names:
new_list.append(ex_dict[i])
return new_list
def example_dict_to_input(example_dict):
return {'dense_input': create_list(example_dict)}
from collections import OrderedDict
wit_data = test_data.iloc[:WHAT_IF_TOOL_SIZE].copy()
wit_data['duration'] = test_labels[:WHAT_IF_TOOL_SIZE]
wit_data_dict = wit_data.to_dict(orient='records', into=OrderedDict)
config_builder = WitConfigBuilder(
wit_data_dict
).set_ai_platform_model(
PROJECT_ID,
MODEL,
VERSION,
adjust_example=example_dict_to_input
).set_target_feature('duration').set_model_type('regression')
WitWidget(config_builder)
"""
Explanation: Understanding AI Explanations with the What-If Tool
In this section you'll use the What-If Tool to better understand how your model is making predictions. See the cell below the What-if Tool for visualization ideas.
The What-If-Tool expects data with keys for each feature name, but your model expects a flat list. The functions below convert data to the format required by the What-If Tool.
End of explanation
"""
# Delete model version resource
! gcloud ai-platform versions delete $VERSION --quiet --model $MODEL
# Delete model resource
! gcloud ai-platform models delete $MODEL --quiet
# Delete Cloud Storage objects that were created
! gsutil -m rm -r gs://$BUCKET_NAME
"""
Explanation: What-If Tool visualization ideas
On the x-axis, you'll see the predicted trip duration for the test inputs you passed to the What-If Tool. Each circle represents one of your test examples. If you click on a circle, you'll be able to see the feature values for that example along with the attribution values for each feature.
You can edit individual feature values and re-run prediction directly within the What-If Tool. Try changing distance, click Run inference and see how that affects the model's prediction
You can sort features for an individual example by their attribution value, try changing the sort from the attributions dropdown
The What-If Tool also lets you create custom visualizations. You can do this by changing the values in the dropdown menus above the scatter plot visualization. For example, you can sort data points by inference error, or by their similarity to a single datapoint.
Cleaning up
To clean up all GCP resources used in this project, you can delete the GCP
project you used for the tutorial.
Alternatively, you can clean up individual resources by running the following
commands:
End of explanation
"""
|
pdwyys20/deep-learning
|
intro-to-tensorflow/intro_to_tensorflow.ipynb
|
mit
|
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
"""
Explanation: <h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from Introduction to TensorFlow to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in different fonts.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "All modules imported".
End of explanation
"""
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
"""
Explanation: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
End of explanation
"""
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# TODO: Implement Min-Max scaling for grayscale image data
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
"""
Explanation: <img src="image/Mean_Variance_Image.png" style="height: 75%;width: 75%; position: relative; right: 5%">
Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the normalize_grayscale() function to a range of a=0.1 and b=0.9. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in grayscale, the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
If you're having trouble solving problem 1, you can view the solution here.
End of explanation
"""
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
"""
Explanation: Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
End of explanation
"""
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
# features =
# labels =
# TODO: Set the weights and biases tensors
# weights =
# biases =
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
"""
Explanation: Problem 2
Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.
<img src="image/network_diagram.png" style="height: 40%;width: 40%; position: relative; right: 10%">
For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network.
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- features
- Placeholder tensor for feature data (train_features/valid_features/test_features)
- labels
- Placeholder tensor for label data (train_labels/valid_labels/test_labels)
- weights
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">tf.truncated_normal() documentation</a> for help.
- biases
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> tf.zeros() documentation</a> for help.
If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available here.
End of explanation
"""
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
# epochs =
# learning_rate =
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
"""
Explanation: <img src="image/Learn_Rate_Tune_Image.png" style="height: 70%;width: 70%">
Problem 3
Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* Epochs: 1
* Learning Rate:
* 0.8
* 0.5
* 0.1
* 0.05
* 0.01
Configuration 2
* Epochs:
* 1
* 2
* 3
* 4
* 5
* Learning Rate: 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
If you're having trouble solving problem 3, you can view the solution here.
End of explanation
"""
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
"""
Explanation: Test
You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
End of explanation
"""
|
unnati-xyz/intro-python-data-science
|
wine/wine-selection.ipynb
|
mit
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
plt.style.use('ggplot')
plt.rcParams['figure.figsize'] = (13,8)
df = pd.read_csv("./winequality-red.csv")
df.head()
df.shape
"""
Explanation: Wine Selection
Framing
I want to buy a fine wine but I have no idea about wine selection.I'm not good at wine tasting.
I will use the data and understand what goes into making fine wine
End of explanation
"""
#df.loc[df.b > 0, 'd'] = 1
df.loc[df.quality > 5, 'category'] = 1
df.loc[df.quality <= 5, 'category'] = 0
"""
Explanation: Wine Category
Let's create a new column 'category' which signifies the category of wine - High (1) or Low (0)
Wine with quality > 6 is considered to be High quality, rest are Low quality
End of explanation
"""
df.category.value_counts()
df.head()
"""
Explanation: This is the frequency count for each category
End of explanation
"""
df.corr()
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df, figsize=(15,15), diagonal='kde')
"""
Explanation: Visual Exploration
Let's see how the columns are related
To start, lets take 2 variables at a time to explore data
Correlation
End of explanation
"""
df.plot(x="alcohol", y="category", kind="scatter")
"""
Explanation: Alcohol vs Category
End of explanation
"""
#df.plot(x="alcohol", y="volatile acidity", kind="scatter", c="category")
ax = df[df.category == 1].plot(x="alcohol", y="volatile acidity", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df[df.category == 0].plot(x="alcohol", y="volatile acidity", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
pd.set_option("precision",3)
"""
Explanation: Exercise: Volatile Acidity vs Category
3 variable visualization
Let's add one more dimension to get more sense what is correlated
Alcohol vs Volatile Acidity vs Category
End of explanation
"""
df.shape
df_train = df.iloc[:1280,]
df_test = df.iloc[1280:,]
X_train = df_train["volatile acidity"]
y_train = df_train["category"]
X_test = df_test["volatile acidity"]
y_test = df_test["category"]
X_train = X_train.reshape(X_train.shape[0],1)
X_test = X_test.reshape(X_test.shape[0],1)
from sklearn.linear_model import LogisticRegression
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
sns.lmplot(data=df, x="alcohol", y="category", logistic=True)
"""
Explanation: Time to build a predictive model
Let's build a model that can predict the category of wine, given information about alcohol content and volatile acidity
Building a predictive model involves training the model with historical data known as training data. Once we have the model trained, the model can predict labels (in this case, the category of wine) for the given features (test data)
We have 1600 rows of the wine data, lets split this data into 80:20 ratio as training:testingg data
Why do we need to do this?
We can compare the predicted label with the actual label.
By doing this, we can measure how accurate our model is.
End of explanation
"""
predicted = logistic_model.predict(X_test)
df_compare = pd.DataFrame()
df_compare["actual"] = y_test
df_compare["predicted"] = predicted
df_compare["volatile acidity"] = df_test["volatile acidity"]
ax=df_compare.plot(x="volatile acidity", y="actual", kind="scatter", color="blue", label="actual")
df_compare.plot(x="volatile acidity", y="predicted", kind="scatter", color="red", label="predicted", ax=ax)
"""
Explanation: It’s a bird… it’s a plane… it… depends on your classifier’s threshold
-- Sancho McCann
End of explanation
"""
df_train = df.iloc[:1280,]
df_test = df.iloc[1280:,]
X_train = df_train[["sulphates", "alcohol"]]
y_train = df_train["category"]
X_test = df_test[["sulphates", "alcohol"]]
y_test = df_test["category"]
logistic_model = LogisticRegression()
logistic_model.fit(X_train, y_train)
predicted = logistic_model.predict(X_test)
df_compare = pd.DataFrame()
df_compare["actual"] = y_test
df_compare["predicted"] = predicted
df_compare["sulphates"] = df_test["sulphates"]
df_compare["alcohol"] = df_test["alcohol"]
df_compare.head()
ax = df_compare[df_compare.actual == 1].plot(x="alcohol", y="sulphates", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df_compare[df_compare.actual == 0].plot(x="alcohol", y="sulphates", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
ax = df_compare[df_compare.predicted == 1].plot(x="alcohol", y="sulphates", kind="scatter", color="red", label="HIGH", s=100, alpha=0.5)
df_compare[df_compare.predicted == 0].plot(x="alcohol", y="sulphates", kind="scatter", color="green", label="LOW", s=100, alpha=0.5, ax=ax)
"""
Explanation: Let's add more features - volatile acidity, sulphates, alcohol to predict the category
2 variable model
End of explanation
"""
from sklearn import metrics
#ols_auc = metrics.roc_auc_score(df_compare.actual, df_compare.predicted)
fpr, tpr, thresholds = metrics.roc_curve(df_compare.actual, df_compare.predicted)
plt.plot(fpr, tpr)
plt.plot([0,1],[0,1])
"""
Explanation: Accuracy Metrics
AUC
ROC
Misclassification Rate
Confusion Matrix
Precision & Recall
Confusion Matrix
Calculate True Positive Rate
TPR = TP / (TP+FN)
Calculate False Positive Rate
FPR = FP / (FP+TN)
Precise & Recall
AUC-ROC for the model
End of explanation
"""
|
statsmodels/statsmodels
|
examples/notebooks/kernel_density.ipynb
|
bsd-3-clause
|
%matplotlib inline
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
"""
Explanation: Kernel Density Estimation
Kernel density estimation is the process of estimating an unknown probability density function using a kernel function $K(u)$. While a histogram counts the number of data points in somewhat arbitrary regions, a kernel density estimate is a function defined as the sum of a kernel function on every data point. The kernel function typically exhibits the following properties:
Symmetry such that $K(u) = K(-u)$.
Normalization such that $\int_{-\infty}^{\infty} K(u) \ du = 1$ .
Monotonically decreasing such that $K'(u) < 0$ when $u > 0$.
Expected value equal to zero such that $\mathrm{E}[K] = 0$.
For more information about kernel density estimation, see for instance Wikipedia - Kernel density estimation.
A univariate kernel density estimator is implemented in sm.nonparametric.KDEUnivariate.
In this example we will show the following:
Basic usage, how to fit the estimator.
The effect of varying the bandwidth of the kernel using the bw argument.
The various kernel functions available using the kernel argument.
End of explanation
"""
np.random.seed(12345) # Seed the random number generator for reproducible results
"""
Explanation: A univariate example
End of explanation
"""
# Location, scale and weight for the two distributions
dist1_loc, dist1_scale, weight1 = -1, 0.5, 0.25
dist2_loc, dist2_scale, weight2 = 1, 0.5, 0.75
# Sample from a mixture of distributions
obs_dist = mixture_rvs(
prob=[weight1, weight2],
size=250,
dist=[stats.norm, stats.norm],
kwargs=(
dict(loc=dist1_loc, scale=dist1_scale),
dict(loc=dist2_loc, scale=dist2_scale),
),
)
"""
Explanation: We create a bimodal distribution: a mixture of two normal distributions with locations at -1 and 1.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Scatter plot of data samples and histogram
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)),
zorder=15,
color="red",
marker="x",
alpha=0.5,
label="Samples",
)
lines = ax.hist(obs_dist, bins=20, edgecolor="k", label="Histogram")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: The simplest non-parametric technique for density estimation is the histogram.
End of explanation
"""
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit() # Estimate the densities
"""
Explanation: Fitting with the default arguments
The histogram above is discontinuous. To compute a continuous probability density function,
we can use kernel density estimation.
We initialize a univariate kernel density estimator using KDEUnivariate.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histogram
ax.hist(
obs_dist,
bins=20,
density=True,
label="Histogram from samples",
zorder=5,
edgecolor="k",
alpha=0.5,
)
# Plot the KDE as fitted using the default arguments
ax.plot(kde.support, kde.density, lw=3, label="KDE from samples", zorder=10)
# Plot the true distribution
true_values = (
stats.norm.pdf(loc=dist1_loc, scale=dist1_scale, x=kde.support) * weight1
+ stats.norm.pdf(loc=dist2_loc, scale=dist2_scale, x=kde.support) * weight2
)
ax.plot(kde.support, true_values, lw=3, label="True distribution", zorder=15)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 40,
marker="x",
color="red",
zorder=20,
label="Samples",
alpha=0.5,
)
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: We present a figure of the fit, as well as the true distribution.
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
# Plot the histogram
ax.hist(
obs_dist,
bins=25,
label="Histogram from samples",
zorder=5,
edgecolor="k",
density=True,
alpha=0.5,
)
# Plot the KDE for various bandwidths
for bandwidth in [0.1, 0.2, 0.4]:
kde.fit(bw=bandwidth) # Estimate the densities
ax.plot(
kde.support,
kde.density,
"--",
lw=2,
color="k",
zorder=10,
label="KDE from samples, bw = {}".format(round(bandwidth, 2)),
)
# Plot the true distribution
ax.plot(kde.support, true_values, lw=3, label="True distribution", zorder=15)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 50,
marker="x",
color="red",
zorder=20,
label="Data samples",
alpha=0.5,
)
ax.legend(loc="best")
ax.set_xlim([-3, 3])
ax.grid(True, zorder=-5)
"""
Explanation: In the code above, default arguments were used. We can also vary the bandwidth of the kernel, as we will now see.
Varying the bandwidth using the bw argument
The bandwidth of the kernel can be adjusted using the bw argument.
In the following example, a bandwidth of bw=0.2 seems to fit the data well.
End of explanation
"""
from statsmodels.nonparametric.kde import kernel_switch
list(kernel_switch.keys())
"""
Explanation: Comparing kernel functions
In the example above, a Gaussian kernel was used. Several other kernels are also available.
End of explanation
"""
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, (ker_name, ker_class) in enumerate(kernel_switch.items()):
# Initialize the kernel object
kernel = ker_class()
# Sample from the domain
domain = kernel.domain or [-3, 3]
x_vals = np.linspace(*domain, num=2 ** 10)
y_vals = kernel(x_vals)
# Create a subplot, set the title
ax = fig.add_subplot(3, 3, i + 1)
ax.set_title('Kernel function "{}"'.format(ker_name))
ax.plot(x_vals, y_vals, lw=3, label="{}".format(ker_name))
ax.scatter([0], [0], marker="x", color="red")
plt.grid(True, zorder=-5)
ax.set_xlim(domain)
plt.tight_layout()
"""
Explanation: The available kernel functions
End of explanation
"""
# Create three equidistant points
data = np.linspace(-1, 1, 3)
kde = sm.nonparametric.KDEUnivariate(data)
# Create a figure
fig = plt.figure(figsize=(12, 5))
# Enumerate every option for the kernel
for i, kernel in enumerate(kernel_switch.keys()):
# Create a subplot, set the title
ax = fig.add_subplot(3, 3, i + 1)
ax.set_title('Kernel function "{}"'.format(kernel))
# Fit the model (estimate densities)
kde.fit(kernel=kernel, fft=False, gridsize=2 ** 10)
# Create the plot
ax.plot(kde.support, kde.density, lw=3, label="KDE from samples", zorder=10)
ax.scatter(data, np.zeros_like(data), marker="x", color="red")
plt.grid(True, zorder=-5)
ax.set_xlim([-3, 3])
plt.tight_layout()
"""
Explanation: The available kernel functions on three data points
We now examine how the kernel density estimate will fit to three equally spaced data points.
End of explanation
"""
obs_dist = mixture_rvs(
[0.25, 0.75],
size=250,
dist=[stats.norm, stats.beta],
kwargs=(dict(loc=-1, scale=0.5), dict(loc=1, scale=1, args=(1, 0.5))),
)
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit()
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.hist(obs_dist, bins=20, density=True, edgecolor="k", zorder=4, alpha=0.5)
ax.plot(kde.support, kde.density, lw=3, zorder=7)
# Plot the samples
ax.scatter(
obs_dist,
np.abs(np.random.randn(obs_dist.size)) / 50,
marker="x",
color="red",
zorder=20,
label="Data samples",
alpha=0.5,
)
ax.grid(True, zorder=-5)
"""
Explanation: A more difficult case
The fit is not always perfect. See the example below for a harder case.
End of explanation
"""
obs_dist = mixture_rvs(
[0.25, 0.75],
size=1000,
dist=[stats.norm, stats.norm],
kwargs=(dict(loc=-1, scale=0.5), dict(loc=1, scale=0.5)),
)
kde = sm.nonparametric.KDEUnivariate(obs_dist)
kde.fit(gridsize=2 ** 10)
kde.entropy
kde.evaluate(-1)
"""
Explanation: The KDE is a distribution
Since the KDE is a distribution, we can access attributes and methods such as:
entropy
evaluate
cdf
icdf
sf
cumhazard
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cdf, lw=3, label="CDF")
ax.plot(np.linspace(0, 1, num=kde.icdf.size), kde.icdf, lw=3, label="Inverse CDF")
ax.plot(kde.support, kde.sf, lw=3, label="Survival function")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: Cumulative distribution, it's inverse, and the survival function
End of explanation
"""
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.plot(kde.support, kde.cumhazard, lw=3, label="Cumulative Hazard Function")
ax.legend(loc="best")
ax.grid(True, zorder=-5)
"""
Explanation: The Cumulative Hazard Function
End of explanation
"""
|
mne-tools/mne-tools.github.io
|
0.12/_downloads/plot_time_frequency_mixed_norm_inverse.ipynb
|
bsd-3-clause
|
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.inverse_sparse import tf_mixed_norm
from mne.viz import plot_sparse_source_estimates
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left visual'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked = mne.pick_channels_evoked(evoked)
# We make the window slightly larger than what you'll eventually be interested
# in ([-0.05, 0.3]) to avoid edge effects.
evoked.crop(tmin=-0.1, tmax=0.4)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname, force_fixed=False,
surf_ori=True)
"""
Explanation: Compute MxNE with time-frequency sparse prior
The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
that promotes focal (sparse) sources (such as dipole fitting techniques).
The benefit of this approach is that:
it is spatio-temporal without assuming stationarity (sources properties
can vary over time)
activations are localized in space, time and frequency in one step.
with a built-in filtering process based on a short time Fourier
transform (STFT), data does not need to be low passed (just high pass
to make the signals zero mean).
the solver solves a convex optimization problem, hence cannot be
trapped in local minima.
References:
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations
Neuroimage, Volume 70, 15 April 2013, Pages 410-422, ISSN 1053-8119,
DOI: 10.1016/j.neuroimage.2012.12.051.
A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, 2011, Volume 6801/2011,
600-611, DOI: 10.1007/978-3-642-22092-0_49
https://doi.org/10.1007/978-3-642-22092-0_49
End of explanation
"""
# alpha_space regularization parameter is between 0 and 100 (100 is high)
alpha_space = 50. # spatial regularization parameter
# alpha_time parameter promotes temporal smoothness
# (0 means no temporal regularization)
alpha_time = 1. # temporal regularization parameter
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute TF-MxNE inverse solution
stc, residual = tf_mixed_norm(evoked, forward, cov, alpha_space, alpha_time,
loose=loose, depth=depth, maxit=200, tol=1e-4,
weights=stc_dspm, weights_min=8., debias=True,
wsize=16, tstep=4, window=0.05,
return_residual=True)
# Crop to remove edges
stc.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
"""
Explanation: Run solver
End of explanation
"""
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="TF-MxNE (cond %s)"
% condition, modes=['sphere'], scale_factors=[1.])
time_label = 'TF-MxNE time=%0.2f ms'
clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
brain = stc.plot('sample', 'inflated', 'rh', clim=clim, time_label=time_label,
smoothing_steps=5, subjects_dir=subjects_dir)
brain.show_view('medial')
brain.set_data_time_index(120)
brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
"""
Explanation: View in 2D and 3D ("glass" brain like 3D plot)
End of explanation
"""
|
chrisjsewell/jsonextended
|
README.ipynb
|
mit
|
from jsonextended import edict, plugins, example_mockpaths
"""
Explanation: JSON Extended
A module to extend the python json package functionality:
Treat a directory structure like a nested dictionary:
lightweight plugin system: define bespoke classes for parsing different file extensions and encoding/decoding objects
lazy loading: read files only when they are indexed into
tab completion: index as tabs for quick exploration of directory
Manipulation of nested dictionaries:
enhanced pretty printer
Javascript rendered, expandable tree in the Jupyter Notebook
functions including; filter, merge, flatten, unflatten, diff
output to directory structure (of n folder levels)
On-disk indexing option for large json files (using the ijson package)
Units schema concept to apply and convert physical units (using the
pint package)
Basic Example
End of explanation
"""
datadir = example_mockpaths.directory1
print(datadir.to_string(indentlvl=3,file_content=True,color=True))
"""
Explanation: Take a directory structure, potentially containing multiple file types:
End of explanation
"""
plugins.load_builtin_plugins('parsers')
plugins.view_plugins('parsers')
"""
Explanation: Plugins can be defined for parsing each file type (see Creating Plugins section):
End of explanation
"""
lazy = edict.LazyLoad(datadir)
lazy
"""
Explanation: LazyLoad then takes a path name, path-like object or dict-like object, which will lazily load each file with a compatible plugin.
End of explanation
"""
list(lazy.keys())
lazy[['file1.json','key1']]
lazy.subdir1.file1_literal_csv.header2
"""
Explanation: Lazyload can then be treated like a dictionary, or indexed by tab completion:
End of explanation
"""
edict.pprint(lazy,depth=2,keycolor='green')
"""
Explanation: For pretty printing of the dictionary:
End of explanation
"""
edict.flatten(lazy.subdir1)
"""
Explanation: Numerous functions exist to manipulate the nested dictionary:
End of explanation
"""
print(example_mockpaths.jsonfile2.to_string())
edict.LazyLoad(example_mockpaths.jsonfile2).to_dict()
plugins.load_builtin_plugins('decoders')
plugins.view_plugins('decoders')
dct = edict.LazyLoad(example_mockpaths.jsonfile2).to_dict()
dct
"""
Explanation: LazyLoad parses the plugins.decode function to parser plugin's read_file method (keyword 'object_hook'). Therefore, bespoke decoder plugins can be set up for specific dictionary key signatures:
End of explanation
"""
plugins.load_builtin_plugins('encoders')
plugins.view_plugins('encoders')
import json
json.dumps(dct,default=plugins.encode)
"""
Explanation: This process can be reversed, using encoder plugins:
End of explanation
"""
from jsonextended import plugins, utils
"""
Explanation: Installation
pip install jsonextended
jsonextended has no import dependancies, on Python 3.x and only pathlib2 on 2.7 but,
for full functionallity, it is advised to install the following packages:
conda install -c conda-forge ijson numpy pint
Creating and Loading Plugins
End of explanation
"""
plugins.view_interfaces()
plugins.unload_all_plugins()
plugins.view_plugins()
"""
Explanation: Plugins are recognised as classes with a minimal set of attributes matching the plugin category interface:
End of explanation
"""
class ParserPlugin(object):
plugin_name = 'example'
plugin_descript = 'a parser for *.example files, that outputs (line_number:line)'
file_regex = '*.example'
def read_file(self, file_obj, **kwargs):
out_dict = {}
for i, line in enumerate(file_obj):
out_dict[i] = line.strip()
return out_dict
"""
Explanation: For example, a simple parser plugin would be:
End of explanation
"""
plugins.load_plugin_classes([ParserPlugin],'parsers')
plugins.view_plugins()
"""
Explanation: Plugins can be loaded as a class:
End of explanation
"""
fobj = utils.MockPath('example.py',is_file=True,content="""
class ParserPlugin(object):
plugin_name = 'example.other'
plugin_descript = 'a parser for *.example.other files, that outputs (line_number:line)'
file_regex = '*.example.other'
def read_file(self, file_obj, **kwargs):
out_dict = {}
for i, line in enumerate(file_obj):
out_dict[i] = line.strip()
return out_dict
""")
dobj = utils.MockPath(structure=[fobj])
plugins.load_plugins_dir(dobj,'parsers')
plugins.view_plugins()
"""
Explanation: Or by directory (loading all .py files):
End of explanation
"""
from jsonextended import ejson, edict, utils
path = utils.get_test_path()
ejson.jkeys(path)
jdict1 = ejson.to_dict(path)
edict.pprint(jdict1,depth=2)
edict.to_html(jdict1,depth=2)
"""
Explanation: For a more complex example of a parser, see jsonextended.complex_parsers
Interface details
Parsers:
file_regex attribute, a str denoting what files to apply it to. A file will be parsed by the longest regex it matches.
read_file method, which takes an (open) file object and kwargs as parameters
Decoders:
dict_signature attribute, a tuple denoting the keys which the dictionary must have, e.g. dict_signature=('a','b') decodes {'a':1,'b':2}
from_... method(s), which takes a dict object as parameter. The plugins.decode function will use the method denoted by the intype parameter, e.g. if intype='json', then from_json will be called.
Encoders:
objclass attribute, the object class to apply the encoding to, e.g. objclass=decimal.Decimal encodes objects of that type
to_... method(s), which takes a dict object as parameter. The plugins.encode function will use the method denoted by the outtype parameter, e.g. if outtype='json', then to_json will be called.
Extended Examples
For more information, all functions contain docstrings with tested examples.
Data Folders JSONisation
End of explanation
"""
jdict2 = ejson.to_dict(path,['dir1','file1'])
edict.pprint(jdict2,depth=1)
filtered = edict.filter_keys(jdict2,['vol*'],use_wildcards=True)
edict.pprint(filtered)
edict.pprint(edict.flatten(filtered))
"""
Explanation: To try the rendered JSON tree, output in the Jupyter Notebook, go to : https://chrisjsewell.github.io/
Nested Dictionary Manipulation
End of explanation
"""
from jsonextended.units import apply_unitschema, split_quantities
withunits = apply_unitschema(filtered,{'volume':'angstrom^3'})
edict.pprint(withunits)
newunits = apply_unitschema(withunits,{'volume':'nm^3'})
edict.pprint(newunits)
edict.pprint(split_quantities(newunits),depth=4)
"""
Explanation: Units Schema
End of explanation
"""
|
CentreForResearchInAppliedLinguistics/clic
|
docs/notebooks/Concordance/A serious concordance.ipynb
|
mit
|
# coding: utf-8
import os
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session, os.path.join(cheshire3Root, 'configs', 'serverConfig.xml'))
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
def build_concordance(term, context):
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/proxinfo c3.chapter-idx all/proxinfo "{}" )""".format(term))
result_set = db.search(session, query)
concordance = []
for result in result_set:
record = result.fetch_record(session)
tree = record.get_dom(session)
text_nodes = tree.xpath('//txt/text()')
text_only = ' '.join(sentence for sentence in text_nodes)
for hit in result.proxInfo:
word_id = hit[0][1]
char_location = hit[0][2]
concordance_line = text_only[char_location - context : char_location + len(term) + context]
#NOTE in these cases record.process_xpath(session, xpath) is not faster
#TODO check there is only one result
#sentence = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::s/@id' % int(word_id + 1))
#paragraph = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::p/@id' % int(word_id + 1))
#concordance.append((concordance_line, sentence[0], paragraph[0]))
concordance.append((hit, concordance_line, text_only, tree))
return concordance
#%timeit -n6 concordance = build_concordance("fog", 25)
concordance = build_concordance("fog", 25)
print len(concordance)
concordance[55][:2]
text = concordance[55][2]
print text[15292:19500]
# is the fourth element the number of w nodes?
from lxml import etree
xmlstring = etree.tostring(concordance[55][3])
snippet = """w o="94">home</w><n>.</n></toks></s><s sid="194" id="GE.c15.s194" eid="456"><txt>Beyond town, we found a heavy mist out, and it fell wet and thick.</txt><toks><w o="0">Beyond</w><n> </n><w o="7">town</w><n>,</n><n> </n><w o="13">we</w><n> </n><w o="16">found</w><n> </n><w o="22">a</w><n> </n><w o="24">heavy</w><n> </n><w o="30">mist</w><n> </n><w o="35">out</w><n>,</n><n> </n><w o="40">and</w><n> </n><w o="44">it</w><n> </n><w o="47">fell</w><n> </n><w o="52">wet</w><n> </n><w o="56">and</w><n> </n><w o="60">thick</w><n>.</n></toks></s><s sid="195" id="GE.c15.s195" eid="457"><txt>The turnpike lamp was a blur, quite out of the lamp\'s usual place apparently, and its rays looked solid substance on the fog.</txt><toks><w o="0">The</w><n> </n><w o="4">turnpike</w><n> </n><w o="13">lamp</w><n> </n><w o="18">was</w><n> </n><w o="22">a</w><n> </n><w o="24">blur</w><n>,</n><n> </n><w o="30">quite</w><n> </n><w o="36">out</w><n> </n><w o="40">of</w><n> </n><w o="43">the</w><n> </n><w o="47">lamp\'s</w><n> </n><w o="54">usual</w><n> </n><w o="60">place</w><n> </n><w o="66">apparently</w><n>,</n><n> </n><w o="78">and</w><n> </n><w o="82">its</w><n> </n><w o="86">rays</w><n> </n><w o="91">looked</w><n> </n><w o="98">solid</w><n> </n><w o="104">substance</w><n> </n><w o="114">on</w><n> </n><w o="117">the</w><n> </n><w o="121">fog</w><n>.</n></toks></s><s sid="196" id="GE.c15.s196" eid="458"><txt>We were noticing this, and saying how that the mist rose with a change of wind from a certain quarter of our marshes, when we came upon a man, slouching under the lee of the turnpike house.</txt><toks><w o="0">We</w><n> </n><w o="3">were</w><n> </n><w o="8">noticing</w><n> </n><w o="17">this</w><n>,</n><n> </n><w o="23">and</w><n> </n><w o="27">saying</w><n> </n><w o="34">how</w><n> </n><w o="38">that</w><n> </n><w o="43">the</w><n> </n><w o="47">mist</w><n> </n><w o="52">rose</w><n> </n><w o="57">with</w><n> </n><w o="62">a</w><n> </n><w o="64">change</w><n> </n><w o="71">of</w><n> </n><w o="74">wind</w><n> </n><w o="79">from</w><n> </n><w o="84">a</w><n> </n><w o="86">certain</w><n> </n><w o="94">quarter</w><n> </n><w o="102">of</w><n> </n><w o="105">our</w><n> </n><w o="109">marshes</w><n>,</n><n> </n><w o="118">when</w><n> </n><w o="123">we</w><n> </n><w o="126">c"""
print snippet
# idea: use a tokenmerger?
xmlstring
# concordance = build_concordance("lamp", 25)
print len(concordance)
for line in concordance[55:56]:
print concordance.index(line), line[0], line[1], "########", line[2][:100]
for line in concordance[0:1000]:
print concordance.index(line)
def build_concordance_with_locations(term, context, max_hits):
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "{}" )""".format(term))
result_set = db.search(session, query)
concordance = []
count = 0
for result in result_set:
if count < max_hits:
record = result.fetch_record(session)
tree = record.get_dom(session)
text_nodes = tree.xpath('//txt/text()')
text_only = ' '.join(sentence for sentence in text_nodes)
for hit in result.proxInfo:
if count < max_hits:
count +=1
word_id = hit[0][1]
char_location = hit[0][2]
concordance_line = text_only[char_location - context : char_location + len(term) + context]
#NOTE in these cases record.process_xpath(session, xpath) is not faster
#TODO check there is only one result
sentence = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::s/@id' % int(word_id + 1))
paragraph = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::p/@id' % int(word_id + 1))
concordance.append((concordance_line, sentence[0], paragraph[0]))
return concordance
%timeit build_concordance_with_locations("fog", 25, 100)
%timeit build_concordance_with_locations("the", 25, 100)
%timeit build_concordance_with_locations("the", 25, 1000)
%prun build_concordance_with_locations("the", 25)
%timeit build_concordance_with_locations("the", 25, 10000)
%timeit build_concordance_with_locations("the", 25, 100000)
%timeit -n1 concordance = build_concordance_with_locations("the", 25, 1000000)
concordance = build_concordance_with_locations("dense fog", 25, 1000)
len(concordance)
for line in concordance:
print line
"""
Explanation: Brand New Concordance
A quick and dirty way of building a concordance
End of explanation
"""
# coding: utf-8
import os
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session, os.path.join(cheshire3Root, 'configs', 'serverConfig.xml'))
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
query = qf.get_query(session, '(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "fog")')
result_set = db.search(session, query)
"""
Explanation: Testing and learning stuff
End of explanation
"""
result = result_set[0]
print result
"""
Explanation: A result in a resultset refers to a recordStore
(which in the case of the chapter-idx are chapters)
End of explanation
"""
result.occurences
"""
Explanation: The occurences value is wrong, from what I can gather.
End of explanation
"""
result.proxInfo[:15]
"""
Explanation: The values in proxInfo are not sorted
End of explanation
"""
record = result.fetch_record(session)
record.byteCount
?record.fetch_proxVector(session)
record.get_xml(session)
tree = record.get_dom(session)
"""
Explanation: From the result one can get an actual result
which we can use to get_xml (as a string)
or get_dom (an XML tree).
End of explanation
"""
%timeit tree.xpath("//txt/text()")
one = tree.xpath("//txt/text()")
len(one)
%timeit tree.xpath("/div/p/s/txt/text()")
two = tree.xpath("/div/p/s/txt/text()")
len(two)
"""
Explanation: Types of XPATH queries needed for the concordance
End of explanation
"""
tree.xpath("/div/p/s[@eid=183]/toks/w[@o=215]/text()")
tree.xpath("/div/p/s[@eid=183]/toks")
tree.xpath("/div/p/s[@eid=183]/toks/w/text()")
tree.xpath("/div/p/self::s[@eid=183]/toks/w[@o=215]")
tree.xpath("/div/p/s/toks/w[@o=215]")
"""
Explanation: If one has the eid and the character ofset (which
is also part of the w elements!) one can the actual
word that was a hit, but one can also go a bit further
and get the sentence and paragraphs id's.
End of explanation
"""
%timeit tree.xpath("//p/s[@eid=183]/@id")
tree.xpath("//p/s[@eid=183]/@id")
%timeit tree.xpath("//*[@eid=183]/@id")
# see how slow this is!
tree.xpath("//*[@eid=183]/@id")
%timeit tree.xpath("//s[@eid=183]/@id")
tree.xpath("//s[@eid=183]/@id")
"""
Explanation: Three ways to get the book, chapter and sentence id:
End of explanation
"""
s = tree.xpath("//p/s[@eid=%i]" % 183)
s
print s[0]
s[0].xpath("self::s/@id")
s[0].xpath("attribute::id")
%timeit tree.xpath("//s[@eid=183]/ancestor::p/@id")
tree.xpath("//s[@eid=183]/ancestor::p/@id")
%timeit s[0].xpath("ancestor::p/@id")
s[0].xpath("ancestor::p/@id")
sentence = tree.xpath("//p/s[@eid=%i]" % 1) # format: ['BH.c1.s93']
sentence
"""
Explanation: If one creates a smaller tree, the computations
are more efficient.
End of explanation
"""
query = qf.get_query(session, '(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "fog")')
result_set = db.search(session, query)
result = result_set[0]
proxinfo = result.proxInfo[:15]
from pprint import pprint # to "prettry print" a nested list nicely
pprint(proxinfo)
record.get_xml(session)[6409:8000]
tree = record.get_dom(session)
prox1 = proxinfo[0][0][0]
prox2 = proxinfo[0][0][1]
prox3 = proxinfo[0][0][2]
prox4 = proxinfo[0][0][3]
for prox in (prox1, prox2, prox3, prox4): print prox
# output is a boolean
tree.xpath('/div/p/s/toks/w/@o=%i' % prox1)
w = tree.xpath('/div/p/s/toks/w[@o=%i]/text()' % prox1)
print w
# o = 0 in this case; this is interesting to see
# quickly what words sentences start with.
w = tree.xpath('/div/descendant::w[%i]' % prox2)
print w
w = tree.xpath('/div/descendant::w[%i]/text()' % int(prox2 + 1))
print w
tree.xpath('/div/descendant::w[%i]/text()' % int(prox2 + 1))
tree.xpath('/div/descendant::w[%i]/ancestor-or-self::s/@id' % int(prox2 + 1))
tree.xpath('/div/descendant::w[%i]/ancestor-or-self::p/@id' % int(prox2 + 1))
#TODO time
record.process_xpath(session, '//*[@eid="%d"]/following::w[%d+1]/ancestor-or-self::s' % (prox1, prox2))
def build_concordance_with_location(term, context):
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "{}" )""".format(term))
result_set = db.search(session, query)
concordance = []
for result in result_set:
record = result.fetch_record(session)
tree = record.get_dom(session)
text_nodes = tree.xpath('//txt/text()')
text_only = ' '.join(sentence for sentence in text_nodes)
for hit in result.proxInfo:
word_id = hit[0][1]
char_location = hit[0][2]
concordance_line = text_only[char_location - context : char_location + len(term) + context]
#NOTE in these cases record.process_xpath(session, xpath) is not faster
#TODO check there is only one result
sentence = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::s/@id' % int(word_id + 1))
paragraph = tree.xpath('/div/descendant::w[%i]/ancestor-or-self::p/@id' % int(word_id + 1))
concordance.append((concordance_line, sentence[0], paragraph[0]))
return concordance
%timeit -n1 concordance = build_concordance_with_locations("the", 25, 1000000)
# coding: utf-8
import os
from cheshire3.baseObjects import Session
from cheshire3.document import StringDocument
from cheshire3.internal import cheshire3Root
from cheshire3.server import SimpleServer
session = Session()
session.database = 'db_dickens'
serv = SimpleServer(session, os.path.join(cheshire3Root, 'configs', 'serverConfig.xml'))
db = serv.get_object(session, session.database)
qf = db.get_object(session, 'defaultQueryFactory')
resultSetStore = db.get_object(session, 'resultSetStore')
idxStore = db.get_object(session, 'indexStore')
%timeit build_concordance_with_locations("the", 25, 100000)
term = 'fog'
"""(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "{}" )""".format(term)
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.chapter-idx any "{}" )""".format(term))
result_set = db.search(session, query)
len(result_set)
#build_concordance(result_set, 'we', 25)
# %timeit build_concordance(result_set, 'the', 25)
# option 1: get chapter as string
# option 2: get sentence as string with location info as well.
#TODO implement eid for quotes and suspensions
#TODO implement start and end result for pagination
the_location_dickens = []
def test():
for result in result_set:
proxinfo = result.proxInfo
record = result.fetch_record(session)
tree = record.get_dom(session)
for hit in proxinfo:
word_location = hit[0][1]
char_location = hit[0][2]
word_location = tree.xpath("/div/p/s/toks/w[@o=215]")
# word_location = tree.xpath('//w[%i]' % word_location)
the_location_dickens.append(word_location)
#location = tree.xpath('//w[word_location]')
#the_in_dickens.append([concordance_line, location])
#%timeit test()
test()
len(the_location_dickens)
# option 1: merge all txt together and then do xpath to get the location
# option 2: get txt with location info as well.
for result in result_set:
proxinfo = result.proxInfo
record = result.fetch_record(session)
tree = record.get_dom(session)
tree.xpath('//w[1]')
the_in_dickens = []
for result in result_set:
proxinfo = result.proxInfo
record = result.fetch_record(session)
tree = record.get_dom(session)
raw_tree = tree.xpath('//txt/text()')
raw_str = ' '.join(sentence for sentence in raw_tree)
for hit in proxinfo:
char_location = hit[0][2]
concordance_line = raw_str[char_location-20:char_location+len("the")+20]
the_in_dickens.append(concordance_line)
for result in result_set:
proxinfo = result.proxInfo
record = result.fetch_record(session)
tree = record.get_dom(session)
raw_tree = tree.xpath('//txt/text()')
raw_str = ' '.join(sentence for sentence in raw_tree)
for hit in proxinfo:
char_location = hit[0][2]
print raw_str[char_location-20:char_location+len("the")+20]
for hit in proxinfo:
char_location = hit[0][2]
print raw_str[char_location-20:char_location+len("the")+20]
"""
Explanation: Searching for fog
End of explanation
"""
def build_concordance_with_locations(term, idx, context, max_hits):
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.{} any/proxinfo "{}" )""".format(idx, term))
result_set = db.search(session, query)
concordance = []
count = 0
for result in result_set:
if count < max_hits:
record = result.fetch_record(session)
tree = record.get_dom(session)
text_nodes = tree.xpath('//txt/text()')
text_only = ' '.join(sentence for sentence in text_nodes)
for hit in result.proxInfo:
if count < max_hits:
count +=1
element_id = hit[0][0]
word_id = hit[0][1]
char_location = hit[0][2]
## the eid only differs from 0 for indexes other than the chapter index
if element_id:
el = tree.xpath('//*[@eid="{}"]'.format(element_id))[0]
el_char_offset = el.get('offset')
word_id = el.get('wordOffset')
char_location = int(el_char_offset) + char_location - 1
concordance_line = text_only[char_location - context : char_location + len(term) + context]
#NOTE in these cases record.process_xpath(session, xpath) is not faster
#TODO check there is only one result
sentence = tree.xpath('/div/descendant::w[{}]/ancestor-or-self::s/@id'.format(int(word_id) + 1))
paragraph = tree.xpath('/div/descendant::w[{}]/ancestor-or-self::p/@id'.format(int(word_id) + 1))
concordance.append((concordance_line, sentence[0], paragraph[0]))
return concordance
concordance = build_concordance_with_locations("fog", "quote-idx", 25, 100)
for line in concordance:
print line[0]
build_concordance_with_locations("fog", "quote-idx", 25, 100)
build_concordance_with_locations("fog", "non-quote-idx", 25, 100)
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.{} any "{}" )""".format("quote-idx", "fog"))
result_set = db.search(session, query)
for result in result_set:
print result.proxInfo
one_result = result_set[0]
one_fetched_result = one_result.fetch_record(session)
dom = one_fetched_result.get_dom(session)
qs = dom.xpath('//*[@eid="1"]')
qs[0].attrib
dom_text_nodes = dom.xpath('//txt/text()')
dom_text_only = ' '.join(sentence for sentence in dom_text_nodes)
dom_text_only[30639+2:30700]
second_result = result_set[1]
print second_result.proxInfo
second_fetched_result = second_result.fetch_record(session)
second_dom = second_fetched_result.get_dom(session)
second_qs = second_dom.xpath('//qs[@eid="94"]')[0]
second_qs.attrib
second_dom_text_nodes = second_dom.xpath('//txt/text()')
second_dom_text_only = ' '.join(sentence for sentence in second_dom_text_nodes)
second_dom_text_only[3259+4:3400]
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.{} any "{}" )""".format("chapter-idx", "fog"))
result_set = db.search(session, query)
for result in result_set:
print result.proxInfo
query = qf.get_query(session, """(c3.subcorpus-idx all "dickens" and/cql.proxinfo c3.{} = "{}" )""".format("chapter-idx", "substance on the fog"))
result_set = db.search(session, query)
print len(result_set)
for result in result_set:
print result.proxInfo
# record.get_xml(session)
result = result_set[0]
result = result.fetch_record(session)
record = result.get_xml(session)
record.find('fog')
record[143708:143800]
type(record)
dom = result.get_dom(session)
dom.xpath('//w[@o="19257"]')
xml_to_clean_txt(dom)[19257:19500]
def xml_to_clean_txt(xmltree):
text_nodes = xmltree.xpath('//txt/text()')
text_only = ' '.join(sentence for sentence in text_nodes)
return text_only
xml_to_clean_txt(dom)[19000
:19500]
"""
Explanation: Specs / Questions / Todos
what if the search terms are more complex? how do you then do their len()?
handle quotes etc.
doing pagination
adding query builder
highlighting in a form if a word is frequent and it will thus take some time
searches for more than a word (either a phrase or an or search)
a transformer somewhere?
In quotes
End of explanation
"""
|
3upperm2n/notes-deeplearning
|
projects/language-translation/dlnd_language_translation.ipynb
|
mit
|
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
#target_text
"""
Explanation: Language Translation
In this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.
Get the Data
Since translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.
End of explanation
"""
#view_sentence_range = (0, 10)
view_sentence_range = (31, 40)
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
"""
Explanation: Explore the Data
Play around with view_sentence_range to view different parts of the data.
End of explanation
"""
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
"""
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
"""
# TODO: Implement Function
source_sentences = source_text.split('\n')
target_sentences = target_text.split('\n')
#print(source_vocab_to_int)
source_id_text = []
for sentence in source_sentences:
words = sentence.split()
mysentence = []
for word in words:
mysentence.append(source_vocab_to_int.get(word,0)) # return 0 if not in the dd
#mysentence.append(source_vocab_to_int[word])
#print(source_vocab_to_int[word])
#print(source_vocab_to_int.get(word,0))
source_id_text.append(mysentence)
target_id_text = []
for sentence in target_sentences:
words = sentence.split()
mysentence = []
for word in words:
mysentence.append(target_vocab_to_int.get(word,0)) # return 0 is the word doesn't exit in the dd
mysentence.append(target_vocab_to_int['<EOS>'])
target_id_text.append(mysentence)
# print(source_id_text[0])
# print(target_id_text[0])
#
# use list comprehension is more efficient
#
#target_ids = [[target_vocab_to_int.get(word) for word in line.split()] + [target_vocab_to_int['<EOS>']] for line in target_text.split('\n')]
return source_id_text, target_id_text
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_text_to_ids(text_to_ids)
"""
Explanation: Implement Preprocessing Function
Text to Word Ids
As you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function text_to_ids(), you'll turn source_text and target_text from words to ids. However, you need to add the <EOS> word id at the end of each sentence from target_text. This will help the neural network predict when the sentence should end.
You can get the <EOS> word id by doing:
python
target_vocab_to_int['<EOS>']
You can get other word ids using source_vocab_to_int and target_vocab_to_int.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
"""
Explanation: Preprocess all the data and save it
Running the code cell below will preprocess all the data and save it to file.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
"""
Explanation: Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: Check the Version of TensorFlow and Access to GPU
This will check to make sure you have the correct version of TensorFlow and access to a GPU
End of explanation
"""
def model_inputs():
"""
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
"""
# TODO: Implement Function
inputs = tf.placeholder(dtype = tf.int32,
shape=(None, None), name='input')
targets = tf.placeholder(dtype = tf.int32,
shape=(None, None), name='targets')
learning_rate = tf.placeholder(dtype = tf.float32,
name='learning_rate')
keep_prob = tf.placeholder(dtype = tf.float32,
name='keep_prob')
return (inputs, targets, learning_rate, keep_prob)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_model_inputs(model_inputs)
"""
Explanation: Build the Neural Network
You'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:
- model_inputs
- process_decoding_input
- encoding_layer
- decoding_layer_train
- decoding_layer_infer
- decoding_layer
- seq2seq_model
Input
Implement the model_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders:
Input text placeholder named "input" using the TF Placeholder name parameter with rank 2.
Targets placeholder with rank 2.
Learning rate placeholder with rank 0.
Keep probability placeholder named "keep_prob" using the TF Placeholder name parameter with rank 0.
Return the placeholders in the following the tuple (Input, Targets, Learing Rate, Keep Probability)
End of explanation
"""
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
"""
Preprocess target data for decoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
"""
# TODO: Implement Function
newbatch = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
newtarget = tf.concat([tf.fill([batch_size, 1],
target_vocab_to_int['<GO>']),
newbatch], 1)
return newtarget
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_process_decoding_input(process_decoding_input)
"""
Explanation: Process Decoding Input
Implement process_decoding_input using TensorFlow to remove the last word id from each batch in target_data and concat the GO ID to the beginning of each batch.
End of explanation
"""
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
"""
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
"""
# TODO: Implement Function
cell = tf.contrib.rnn.BasicLSTMCell(rnn_size) # lstm cell
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob = keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
output, state = tf.nn.dynamic_rnn(cell, rnn_inputs, dtype=tf.float32)
return state
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_encoding_layer(encoding_layer)
"""
Explanation: Encoding
Implement encoding_layer() to create a Encoder RNN layer using tf.nn.dynamic_rnn().
End of explanation
"""
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
"""
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
"""
# TODO: Implement Function
decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
outputs, state, context = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell,
decoder_fn,
inputs = dec_embed_input,
sequence_length=sequence_length,
scope=decoding_scope)
training_logits = output_fn(outputs)
# add additional dropout
# tf.nn.dropout(training_logits, keep_prob)
return training_logits
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_train(decoding_layer_train)
"""
Explanation: Decoding - Training
Create training logits using tf.contrib.seq2seq.simple_decoder_fn_train() and tf.contrib.seq2seq.dynamic_rnn_decoder(). Apply the output_fn to the tf.contrib.seq2seq.dynamic_rnn_decoder() outputs.
End of explanation
"""
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
"""
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
"""
# TODO: Implement Function
infer_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(output_fn,
encoder_state,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
maximum_length,
num_decoder_symbols = vocab_size,
dtype = tf.int32)
dp_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob = keep_prob)
outputs, state, context = tf.contrib.seq2seq.dynamic_rnn_decoder(dp_cell,
infer_fn,
sequence_length=maximum_length,
scope=decoding_scope)
return outputs
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer_infer(decoding_layer_infer)
"""
Explanation: Decoding - Inference
Create inference logits using tf.contrib.seq2seq.simple_decoder_fn_inference() and tf.contrib.seq2seq.dynamic_rnn_decoder().
End of explanation
"""
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
"""
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
start_symb, end_symb = target_vocab_to_int['<GO>'], target_vocab_to_int['<EOS>']
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
dropout = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
stack_lstm = tf.contrib.rnn.MultiRNNCell([dropout] * num_layers)
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, activation_fn=None,scope = decoding_scope)
with tf.variable_scope('decoding') as decoding_scope:
training_logits = decoding_layer_train(encoder_state,
stack_lstm,
dec_embed_input,
sequence_length,
decoding_scope,
output_fn,
keep_prob)
with tf.variable_scope('decoding', reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(encoder_state,
stack_lstm,
dec_embeddings,
start_symb,
end_symb,
sequence_length,
vocab_size,
decoding_scope,
output_fn,
keep_prob)
# option 2: more concise
# decoding_scope.reuse_variables()
# infer_logits = decoding_layer_infer(encoder_state,
# stack_lstm,
# dec_embeddings,
# start_symb,
# end_symb,
# sequence_length,
# vocab_size,
# decoding_scope,
# output_fn,
# keep_prob)
return (training_logits, infer_logits)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_decoding_layer(decoding_layer)
"""
Explanation: Build the Decoding Layer
Implement decoding_layer() to create a Decoder RNN layer.
Create RNN cell for decoding using rnn_size and num_layers.
Create the output fuction using lambda to transform it's input, logits, to class logits.
Use the your decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope, output_fn, keep_prob) function to get the training logits.
Use your decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, maximum_length, vocab_size, decoding_scope, output_fn, keep_prob) function to get the inference logits.
Note: You'll need to use tf.variable_scope to share variables between training and inference.
End of explanation
"""
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
"""
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
"""
# TODO: Implement Function
enc_embed = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
encode = encoding_layer(enc_embed, rnn_size, num_layers, keep_prob)
dec_process = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embed = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_input = tf.nn.embedding_lookup(dec_embed, dec_process)
train_logits, infer_logits = decoding_layer(dec_input,
dec_embed,
encode,
target_vocab_size,
sequence_length,
rnn_size,
num_layers,
target_vocab_to_int,
keep_prob)
return (train_logits, infer_logits)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_seq2seq_model(seq2seq_model)
"""
Explanation: Build the Neural Network
Apply the functions you implemented above to:
Apply embedding to the input data for the encoder.
Encode the input using your encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob).
Process target data using your process_decoding_input(target_data, target_vocab_to_int, batch_size) function.
Apply embedding to the target data for the decoder.
Decode the encoded input using your decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob).
End of explanation
"""
# Number of Epochs
epochs = 4
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 3
# Embedding Size
encoding_embedding_size = 128
decoding_embedding_size = 128
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.8
"""
Explanation: Neural Network Training
Hyperparameters
Tune the following parameters:
Set epochs to the number of epochs.
Set batch_size to the batch size.
Set rnn_size to the size of the RNNs.
Set num_layers to the number of layers.
Set encoding_embedding_size to the size of the embedding for the encoder.
Set decoding_embedding_size to the size of the embedding for the decoder.
Set learning_rate to the learning rate.
Set keep_probability to the Dropout keep probability
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
"""
Explanation: Build the Graph
Build the graph using the neural network you implemented.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import time
def get_accuracy(target, logits):
"""
Calculate accuracy
"""
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
"""
Explanation: Train
Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# Save parameters for checkpoint
helper.save_params(save_path)
"""
Explanation: Save Parameters
Save the batch_size and save_path parameters for inference.
End of explanation
"""
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
"""
Explanation: Checkpoint
End of explanation
"""
def sentence_to_seq(sentence, vocab_to_int):
"""
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
"""
# TODO: Implement Function
wid_list = []
for word in sentence.lower().split():
wid_list.append(vocab_to_int.get(word, vocab_to_int['<UNK>']))
return wid_list
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_sentence_to_seq(sentence_to_seq)
"""
Explanation: Sentence to Sequence
To feed a sentence into the model for translation, you first need to preprocess it. Implement the function sentence_to_seq() to preprocess new sentences.
Convert the sentence to lowercase
Convert words into ids using vocab_to_int
Convert words not in the vocabulary, to the <UNK> word id.
End of explanation
"""
translate_sentence = 'he saw a old yellow truck .'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
"""
Explanation: Translate
This will translate translate_sentence from English to French.
End of explanation
"""
|
jackbrucesimpson/Machine-Learning-Workshop
|
training_testing.ipynb
|
mit
|
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
%matplotlib inline
"""
Explanation: Building a machine learning program
In this section we put together everything we learned about images and features so that we can train a machine learning algorithm to distinguish between the images of different tags.
End of explanation
"""
import glob
rect_tag_files = glob.glob('data/I/*.png')
circle_tag_files = glob.glob('data/O/*.png')
queen_tag_files = glob.glob('data/Q/*.png')
rect_image = cv2.imread(rect_tag_files[0], cv2.IMREAD_GRAYSCALE)
circle_image = cv2.imread(circle_tag_files[0], cv2.IMREAD_GRAYSCALE)
queen_image = cv2.imread(queen_tag_files[0], cv2.IMREAD_GRAYSCALE)
plt.figure(figsize = (10, 7))
plt.title('Rectangle Tag')
plt.axis('off')
plt.imshow(rect_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Circle Tag')
plt.axis('off')
plt.imshow(circle_image, cmap = cm.Greys_r)
plt.figure(figsize = (10, 7))
plt.title('Queen Tag')
plt.axis('off')
plt.imshow(queen_image, cmap = cm.Greys_r)
"""
Explanation: Loading the Dataset
We'll start by loading the entire dataset of images viewing a few of them to double check that everything worked:
End of explanation
"""
rect_tag_class = len(rect_tag_files) * [1]
circle_tag_class = len(circle_tag_files) * [2]
queen_tag_class = len(queen_tag_files) * [3]
print(len(rect_tag_files), len(rect_tag_class), rect_tag_files[0], rect_tag_class[0])
print(len(circle_tag_files), len(circle_tag_class), circle_tag_files[0], circle_tag_class[0])
print(len(queen_tag_files), len(queen_tag_class), queen_tag_files[0], queen_tag_class[0])
"""
Explanation: Classes and Reading Images
If you recall from earlier, the three different tag types indicated a different group in the experiment.
Tag Number | Pattern | Details
:-------------------------:|:-------------------------: | :-------------------------:
1 | Rectangle | 100 bees that were the control group
2 | Circle | 100 bees that were treated with caffeine
3 | Blank | Single queen in the colony received this tag
Just like in statistics, machine learning algorithms require that we refer to each image as a number, like levels in a factor. In the case of machine learning however, these levels are called classes. We'll now create a list that will assign each image a number which corresponds to the class it belongs to:
End of explanation
"""
all_tag_files = []
all_tag_files.extend(rect_tag_files)
all_tag_files.extend(circle_tag_files)
all_tag_files.extend(queen_tag_files)
all_classifications = []
all_classifications.extend(rect_tag_class)
all_classifications.extend(circle_tag_class)
all_classifications.extend(queen_tag_class)
all_images = []
for image_file in all_tag_files:
read_image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
all_images.append(read_image)
print("Do the number of images and the number of classifications in the two lists match up?")
print(len(all_images), len(all_classifications))
"""
Explanation: Reading Images
We now need to read in the image files we have listed and merge the classifications into one list.
End of explanation
"""
test_images = [rect_image, circle_image, queen_image]
def modify_image(img):
#img = cv2.blur(image, (5, 5))
#img = cv2.GaussianBlur(image, (5, 5), 0)
img = cv2.medianBlur(image, 5)
#img += 10
img * 1.9
img = img[4:20,4:20]
return img
for image in test_images:
image = modify_image(image)
plt.figure(figsize = (15, 12))
plt.axis('off')
plt.imshow(image, cmap = cm.Greys_r)
"""
Explanation: Feature Engineering
Now we have our images, let's use some of the skills we learned earlier to apply some manipulations to the images. We can play with this a lot, and it's often the section you'll come back to once you test out the classification system you train to see if you can improve performance.
Feature Tests
Here we'll visualise the manipulations of a few different images before we apply it to all of them. The easiest way to do this is to create a function. Here we can apply things like modifying the brightness, smoothing and blurring the images.
End of explanation
"""
all_images_flat = []
for image in all_images:
mod_image = modify_image(image)
flat_image = mod_image.flatten()
all_images_flat.append(flat_image)
X = np.array(all_images_flat)
y = np.array(all_classifications)
print(X.shape, y.shape)
"""
Explanation: Modifying and Flattening All Images
Once we've settled on a technique we think might work, we can apply it to all the images in our dataset. We can then flatten the images and convert them to NumPy arrays. The reason why we need to flatten them is because we need to be able to pass a matrix of values to the machine learning algorithm we train.
Once we have done that, we'll want to convert the flattened images and our list of classifications into a NumPy array so that it is in a format that scikitlearn can understand.
End of explanation
"""
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=4)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
"""
Explanation: Splitting a training and testing dataset
Ok, now that our data is ready, we have one more thing to consider before we can train our machine learning program: testing. How do I know that my program will be more accurate if I change the brightness and contrast? How do I that after all this work, my program is doing no better than random chance at determining which tag is in an image?
The solution to this is to split up our data into two segments: a training set and a testing set. The training set is what we will allow our machine learning program to learn from, while the testing set allows us to then see how accurate the program is with data that it has not been exposed to.
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
fit_trans_X = pca.fit(X).transform(X)
plt.figure(figsize = (35, 20))
plt.scatter(fit_trans_X[:, 0], fit_trans_X[:, 1], c=y, s=400)
"""
Explanation: We now have 584 images that we will train our program on and 146 images to test the accuracy of its predictions. Each of the pixels that each image has is now a feature or dimension that we can train our machine learning program on.
Visualising the Data
Now that we have hundreds of images, each with hundreds of dimensions, we need to try to find a way to visualise all this data. To acheive this, we'll try to use a dimensionality reduction technique called PCA. PCA is an unsupervised technique which tries to collapse the number of dimensions in your data down by looking for variables with the highest variation. Here, we'll try to collapse the data down to just 2 dimensions:
End of explanation
"""
from sklearn.lda import LDA
lda = LDA(n_components=2)
lda_model = lda.fit(X_train, y_train)
X_trans = lda_model.transform(X_train)
plt.figure(figsize = (35, 20))
plt.scatter(X_trans[:, 0], X_trans[:, 1], c=y_train, s=400)
"""
Explanation: This looks promising, it looks like PCA was able to separate out the clusters which correpsond to the different image types in our data. Things are looking good, but I think we can do better with a supervised dimensionality techinuqe called LDA. LDA is very similar to PCA, except we tell it what groups we want to separate out with our data and it looks for the variation which will help us achieve this.
End of explanation
"""
from sklearn import svm
clf = svm.SVC(gamma=0.0001, C=10)
clf.fit(X_trans, y_train)
"""
Explanation: That's looking really good now, we have 3 neat clusters for each of the tag types. Now we can can try to use the data output by LDA to train a machine learning algorithm called a support vector machine (SVM).
SVM Classification
A support vector machine is a machine learning techinque which tries to separate out classes by working out lines which separate out the different groups. Once we have trained an SVM, it will try to use these lines to predict which class a new datapoint should belong to. One of the really powerful things about this technique is that while the image below shows it separating out groups in two dimensions, it can work with data that has so many dimensions we have difficulty visualising it.
Below we will train an SVM and experiment a little with a couple of the different parameters.
End of explanation
"""
transform_testing_set = lda.transform(X_test)
y_pred = clf.predict(transform_testing_set)
"""
Explanation: Accuracy
Now that we have trained our SVM on our LDA transformed dataset, we should transform our testing dataset and use the SVM to make predictions:
End of explanation
"""
from sklearn import metrics
print (metrics.accuracy_score(y_test, y_pred))
"""
Explanation: Great, we have now successfully made some predictions with the testing dataset, but how do we tell if any of them were right? To help with this, we can use scikit-learn's metrics to evaluate how accurate our predictions were. This will give us a number between 0 and 1 which will tell us if it got 0% of the predictions correct all the way through to 100%.
End of explanation
"""
|
fastai/course-v3
|
zh-nbs/Lesson3_head_pose.ipynb
|
apache-2.0
|
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.vision import *
"""
Explanation: Practical Deep Learning for Coders, v3
Lesson3_head_pose
Regression with BIWI head pose dataset<br>
用BIWI头部姿势数据集进行回归建模
This is a more advanced example to show how to create custom datasets and do regression with images. Our task is to find the center of the head in each image. The data comes from the BIWI head pose dataset, thanks to Gabriele Fanelli et al. We have converted the images to jpeg format, so you should download the converted dataset from this link.<br>
这个案例是一个更高级的示例,它展示了如何创建自定义数据集,并且对图像进行回归建模。 我们的任务是在每个图片中确定头部的中心位置。数据来自BIWI头部姿势数据集。感谢Gabriele Fanelli等人的努力。我们已经把图片转化为jpeg格式,因此你应该从这里下载转化好的数据。
End of explanation
"""
path = untar_data(URLs.BIWI_HEAD_POSE)
cal = np.genfromtxt(path/'01'/'rgb.cal', skip_footer=6); cal
fname = '09/frame_00667_rgb.jpg'
def img2txt_name(f): return path/f'{str(f)[:-7]}pose.txt'
img = open_image(path/fname)
img.show()
ctr = np.genfromtxt(img2txt_name(fname), skip_header=3); ctr
def convert_biwi(coords):
c1 = coords[0] * cal[0][0]/coords[2] + cal[0][2]
c2 = coords[1] * cal[1][1]/coords[2] + cal[1][2]
return tensor([c2,c1])
def get_ctr(f):
ctr = np.genfromtxt(img2txt_name(f), skip_header=3)
return convert_biwi(ctr)
def get_ip(img,pts): return ImagePoints(FlowField(img.size, pts), scale=True)
get_ctr(fname)
ctr = get_ctr(fname)
img.show(y=get_ip(img, ctr), figsize=(6, 6))
"""
Explanation: Getting and converting the data
数据获取与格式转换
End of explanation
"""
data = (PointsItemList.from_folder(path)
.split_by_valid_func(lambda o: o.parent.name=='13')
.label_from_func(get_ctr)
.transform(get_transforms(), tfm_y=True, size=(120,160))
.databunch().normalize(imagenet_stats)
)
data.show_batch(3, figsize=(9,6))
"""
Explanation: Creating a dataset
创建一个数据集
End of explanation
"""
learn = cnn_learner(data, models.resnet34)
learn.lr_find()
learn.recorder.plot()
lr = 2e-2
learn.fit_one_cycle(5, slice(lr))
learn.save('stage-1')
learn.load('stage-1');
learn.show_results()
"""
Explanation: Train model
训练模型
End of explanation
"""
tfms = get_transforms(max_rotate=20, max_zoom=1.5, max_lighting=0.5, max_warp=0.4, p_affine=1., p_lighting=1.)
data = (PointsItemList.from_folder(path)
.split_by_valid_func(lambda o: o.parent.name=='13')
.label_from_func(get_ctr)
.transform(tfms, tfm_y=True, size=(120,160))
.databunch().normalize(imagenet_stats)
)
def _plot(i,j,ax):
x,y = data.train_ds[0]
x.show(ax, y=y)
plot_multi(_plot, 3, 3, figsize=(8,6))
"""
Explanation: Data augmentation
数据增强
End of explanation
"""
|
MatteusDeloge/opengrid
|
notebooks/DemoTmpo.ipynb
|
apache-2.0
|
import sys
import os
import inspect
import tmpo
import pandas as pd
import datetime as dt
import matplotlib.pyplot as plt
import pytz
from opengrid.library import houseprint
from opengrid import config
c=config.Config()
%matplotlib inline
plt.rcParams['figure.figsize'] = 14,8
"""
Explanation: Quick Tmpo demo
To get started, clone the tmpo-py repository from the opengrid github page. Then specify the path to this repo on your hard drive in your opengrid.cfg file.
End of explanation
"""
s = tmpo.Session()
s.debug = True
"""
Explanation: Create a tmpo session, and enter debug mode to get more output.
End of explanation
"""
s.add('d209e2bbb35b82b83cc0de5e8b84a4ff','e16d9c9543572906a11649d92f902226')
"""
Explanation: Add a sensor and token to start tracking the data for this given sensor. You only have to do this once for each sensor.
End of explanation
"""
s.sync()
"""
Explanation: Sync all available data to your hard drive. All sensors previously added will be synced.
End of explanation
"""
ts = s.series('d209e2bbb35b82b83cc0de5e8b84a4ff')
print(ts)
"""
Explanation: Now you can create a pandas timeseries with all data from a given sensor.
End of explanation
"""
ts.plot()
plt.show()
"""
Explanation: When plotting the data, you'll notice that this ts contains cumulative data, and the time axis (= pandas index) contains seconds since the epoch. Not very practical.
End of explanation
"""
tsmin = ts.resample(rule='60s')
tsmin=tsmin.interpolate(method='linear')
tsmin=tsmin.diff()*3600/60
tsmin.plot()
"""
Explanation: To show differential data (eg instantaneous power), we first have to resample this cumulative data to the interval we want to obtain. We use linear interpolation to approximate the cumulative value between two datapoints. In the example below, we resample to minute values. Then, we take the difference between the cumulative values at minute intervals in order to get the average power (per minute). As the original data is in Wh, we have to convert it to W.
End of explanation
"""
tsmin.ix['20141016':'20141018'].plot()
ts.name
"""
Explanation: If we want to plot only a specific period, we can slice the data with the .ix[from:to] method.
End of explanation
"""
|
dataDogma/Computer-Science
|
.ipynb_checkpoints/DAT208x - Week 1 - Python Basics-checkpoint.ipynb
|
gpl-3.0
|
# working with print function
print(5 / 8)
# Add another print function on new line
print(7 + 10)
"""
Explanation: Lecture : Hello Python!
[RQ-1] : Which of the following statements is correct?
Ans: The Ipython Shell is typically used to work with Python interactively.
[RQ-2] : Which file extension is used for Python script files?**
Ans: .py
[RQ-3] : You need to print the result of adding 3 and 4 inside a script. Which line of code should you write in the script?
Ans: print(int x + int y)
Lab : Hello Python!
Objective :
How to work with Ipython shell.
Writing python scripts.
The Python Interface -- 100xp, Status : Earned
End of explanation
"""
# Just testing division
print(5 / 8)
# Additon works too ( added comment here )
print(7 + 10)
"""
Explanation: When to use python? -- 50xp, Status : Earned
Python is a pretty versatile language. For what applications can you use Python?
Ans: All of the above
Any comments? -- 100xp, Satatus : Earned
We can add comments to python scripts.
Comments are short snippets of plain english, to help you and others understand what the code is about.
To add a comment, use '#'tag, insert it at the front of the text.
Comments have idle state, i.e. they don't affect the code results.
Comments are ignored by the python interpretor.
End of explanation
"""
"""Suppose you have $100, which you can invest with a 10% return each year. After one year, it's
100 x 1.1 = 110 dollars, and after two years it's 100 x 1.1 x 1.1 = 121.
Add code to calculate how much money you end up with after 7 years"""
print(5 + 5)
print(5 - 5)
# Multiplication and division
print(3 * 5)
print(10 / 2)
# Exponentiation
print(4 ** 2)
# Modulo
print(18 % 7)
# How much is your $100 worth after 7 years?
# first try was unsuccesful, so used the only two things * and ** operators.
print ( 100 * ( 1.1 ** 7 ) )
"""
Explanation: Python as a calculator -- 100xp, Status : Earned
Python is perfectly suited to do basic calculations. Apart from addition, subtraction, multiplication and division, there is also support for more advanced operations such as:
Exponentiation:. This operator raises the number to its left to the power of the number to its right: for example 42 will give 16.
Modulo: %. It returns the remainder of the division of the number to the left by the number on its right, for example 18 % 7 equals 4.
End of explanation
"""
|
KECB/learn
|
BAMM.101x/Datetime_Example.ipynb
|
mit
|
#Unfortunatel, this won't work on Windows.
!head sample_data.csv
"""
Explanation: <h1>Bucketing time</h1>
<h4>The file "sample_data.csv" contains start times and processing times for all complaints registered with New York City's 311 complaint hotline on 01/01/2016. Our goal is to compute the average processing time for each hourly bucket.
<h4>Let's take a quick look at the data
End of explanation
"""
data_tuples = list()
with open('sample_data.csv','r') as f:
for line in f:
data_tuples.append(line.strip().split(','))
"""
Explanation: <h3>Step 1: Read the data</h3>
End of explanation
"""
data_tuples[0:10]
"""
Explanation: <h4>Let's look at the first 10 lines</h4>
End of explanation
"""
#Figure out the format string
# http://pubs.opengroup.org/onlinepubs/009695399/functions/strptime.html
import datetime
x='2016-01-01 00:00:09'
format_str = "%Y-%m-%d %H:%M:%S"
datetime.datetime.strptime(x,format_str)
data_tuples = list()
with open('sample_data.csv','r') as f:
for line in f:
data_tuples.append(line.strip().split(','))
import datetime
for i in range(0,len(data_tuples)):
data_tuples[i][0] = datetime.datetime.strptime(data_tuples[i][0],format_str)
data_tuples[i][1] = float(data_tuples[i][1])
#Let's see if this worked
data_tuples[0:10]
"""
Explanation: <li><b>Element 1 of the tuple is a date inside a string
<li>Element 2 is double inside a string
<li>Let's convert them
End of explanation
"""
#Extract the hour from a datetime object
x=data_tuples[0][0]
x.hour
"""
Explanation: <h4>We can replace the datetime by hourly buckets</h4>
End of explanation
"""
data_tuples = [(x[0].hour,x[1]) for x in data_tuples]
data_tuples[0:10]
data_tuples = list()
with open('sample_data.csv','r') as f:
for line in f:
data_tuples.append(line.strip().split(','))
import datetime
for i in range(0,len(data_tuples)):
data_tuples[i][0] = datetime.datetime.strptime(data_tuples[i][0],format_str)
data_tuples[i][1] = float(data_tuples[i][1])
"""
Explanation: <h4>Use list comprehension to bucket the data</h4>
End of explanation
"""
def get_data(filename):
data_tuples = list()
with open(filename,'r') as f:
for line in f:
data_tuples.append(line.strip().split(','))
import datetime
format_str = "%Y-%m-%d %H:%M:%S"
data_tuples = [(datetime.datetime.strptime(x[0],format_str).hour,float(x[1])) for x in data_tuples]
return data_tuples
get_data('sample_data.csv')
"""
Explanation: <h3>Create a function that returns the data</h3>
End of explanation
"""
buckets = dict()
for item in get_data('sample_data.csv'):
if item[0] in buckets:
buckets[item[0]][0] += 1
buckets[item[0]][1] += item[1]
else:
buckets[item[0]] = [1,item[1]]
buckets
"""
Explanation: <h3>Step 2: Accumulate counts and sums for each bucket
End of explanation
"""
for key,value in buckets.items():
print("Hour:",key,"\tAverage:",value[1]/value[0])
"""
Explanation: <h3>Let's print them to see what sort of pattern is there in the data</h3>
<h4>Bear in mind that this is just one day's data!
End of explanation
"""
def get_hour_bucket_averages(filename):
def get_data(filename):
data_tuples = list()
with open(filename,'r') as f:
for line in f:
data_tuples.append(line.strip().split(','))
import datetime
format_str = "%Y-%m-%d %H:%M:%S"
data_tuples = [(datetime.datetime.strptime(x[0],format_str).hour,float(x[1])) for x in data_tuples]
return data_tuples
buckets = dict()
for item in get_data(filename):
if item[0] in buckets:
buckets[item[0]][0] += 1
buckets[item[0]][1] += item[1]
else:
buckets[item[0]] = [1,item[1]]
return [(key,value[1]/value[0]) for key,value in buckets.items()]
get_hour_bucket_averages('sample_data.csv')
"""
Explanation: <h3>Put everything into a function</h3>
<h4>This way, we can easily test other similar datasets
End of explanation
"""
get_hour_bucket_averages('all_data.csv')
def remove_punctuation(word):
punctuations = ['.', '!', '?', ',', '(', ')']
for punctuation in punctuations:
if punctuation in word:
print(punctuation)
word.replace(punctuation, '')
return word
remove_punctuation("sis!")
"""
Explanation: <h3>The file all_data.csv contains data from January to September 2016</h3>
<h4>We can test whether our one day result is generally true or not</h4>
End of explanation
"""
|
ga7g08/ga7g08.github.io
|
_notebooks/2015-04-24-Gaussian-mixture-model-for-pulsar-population.ipynb
|
mit
|
%%writefile Makefile
DOWNLOADED = psrcat_pkg.tar
ATNF_DATABASE = psrcat_tar
DATA_FILE = ATNF_data_file.txt
PSRCAT_FILE_PATH = ./psrcat_tar/psrcat.db
all: $(DATA_FILE) $(ATNF_DATABASE)
.PHONY: clean
$(ATNF_DATABASE):
wget http://www.atnf.csiro.au/people/pulsar/psrcat/downloads/psrcat_pkg.tar.gz
gunzip psrcat_pkg.tar.gz
tar -xvf psrcat_pkg.tar
cd psrcat_tar && ls && . makeit
$(DATA_FILE):
export PSRCAT_FILE=${PSRCAT_FILE_PATH}; \
./psrcat_tar/psrcat -c "name F0 F1 F2 BINARY TYPE W10" -nonumber -o short > ${DATA_FILE}
clean:
rm -f ${DOWNLOADED}
"""
Explanation: Modelling the pulsar population with a Gaussian mixture model
In this script I will describe some work I did cluster the pulsar population using Gaussian mixture models.
Motivation
Pulsars come in a range of types which we can imagine as a subpoopulations of the whole population. Two of the largest are normal radio pulsars and millisecond pulsars characterised by a much larger spin-frequency. To see an overview of all the types have a look here.
In this work I wanted to come up with a method to distinguish between these populations.
Setup
Firstly we need to get hold of the data, this comes courtesy of the ANTF pulsar catalouge. Here is a make file to download and extract the data we need:
End of explanation
"""
! head ATNF_data_file.txt
"""
Explanation: Unfortunately using a makefile in the notebook is kind of buggy so it is best to run this from the command line:
$ make
This will have created a file ATNF_data_file.txt in the local directory which looks like:
End of explanation
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
%matplotlib inline
DATA_FILE = "ATNF_data_file.txt"
data = np.genfromtxt(DATA_FILE, skip_header=4, skip_footer=1, dtype=None)
F0 = np.genfromtxt(data[:, 1])
F1 = np.genfromtxt(data[:, 2])
F0 = F0[np.isfinite(F0)]
F1 = F1[np.isfinite(F0)]
F0 = F0[np.isfinite(F1)]
F1 = F1[np.isfinite(F1)]
data = np.log10(F0)
out = plt.hist(data, bins=50)
plt.xlabel("$\log_{10}(f)$")
plt.ylabel("Count")
plt.show()
"""
Explanation: For the time being, we can just import the data which we are interested in, namely the frequency 'F0'.
We now import it, sanitize it and plot a histogram to motivate the rest of the work
End of explanation
"""
from sklearn.mixture import GMM
model = GMM(2)
model.fit(data)
"""
Explanation: As you can appreciate there are two distinct populations of pulsars: the larger regular population and the smaller
millisecond pulsars which have significantly larger spin frequencies. We will now apply a Gaussian mixture model to
learn about the underlying populations
Gaussian Mixture model
With skikit-learn this is an incredibly simple problem. We simply create a model which we restrict to having two components. Then fit the model to the data, this is using the Expectation maximisation method as described in the docs.
End of explanation
"""
vals = np.linspace(-1.5, 3, 100)
logprobs, responcibility = model.score_samples(vals)
pdf = np.exp(logprobs)
plt.hist(data, bins=50, normed=True)
plt.plot(vals, pdf, lw=3)
plt.show()
"""
Explanation: Checking the fit
To check the fit, let us plot the pdf of the model with the data
End of explanation
"""
from scipy.stats import norm
[muA, muB] = model.means_
[sigmaA, sigmaB] = np.sqrt(model.covars_)
[wA, wB] = model.weights_
plt.hist(data, bins=50, normed=True)
plt.plot(vals, wA*norm.pdf(vals, loc=muA, scale=sigmaA), lw=3)
plt.plot(vals, wB*norm.pdf(vals, loc=muB, scale=sigmaB), lw=3)
plt.show()
"""
Explanation: This looks very promising, we can also extract the mean and standard deviations of the two Gaussian components in the mixture:
End of explanation
"""
def SmallerThanNeigbours(y):
" Return a boolean array for the entries in x smaller than both neibours"
return np.r_[True, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], True]
split = SmallerThanNeigbours(pdf)[1:-1]
min_val = vals[split][0]
plt.axvline(min_val)
plt.plot(vals, pdf, lw=3)
print "The min_val if F0 = {:2.2f} Hz".format(10**(min_val))
"""
Explanation: Getting an approximate cut-off
We can approximate the distinction between the two populations by the minima in the joint density. This can
easily be found by a smaller than neigbours check:
End of explanation
"""
labels = model.predict(data)
ax = plt.subplot(111)
ax.scatter(np.log10(F0[labels==0]), np.log10(np.abs(F1[labels==0])), c="r")
ax.scatter(np.log10(F0[labels==1]), np.log10(np.abs(F1[labels==1])), c="b")
ax.set_xlabel("$\log_{10}(f)$")
ax.set_ylabel("$\log_{10}(\dot{f})$")
plt.show()
"""
Explanation: Classifying the points
Finally we will plot the frequency and first derivative values is a so called "f-fdot" plot.
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.