repo_name
stringlengths 6
77
| path
stringlengths 8
215
| license
stringclasses 15
values | cells
list | types
list |
|---|---|---|---|---|
wtbarnes/loops-workshop-2017-talk
|
notebooks/time_average_em.ipynb
|
mit
|
[
"Time-average EM Cubes\nCalculate the time-averaged emission measure distributions from the exact thermodynamic results and save them to be easily reloaded and used later.",
"import os\nimport io\nimport copy\nimport glob\nimport urllib\n\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nimport seaborn as sns\nimport astropy.units as u\nimport astropy.constants as const\nfrom scipy.ndimage import gaussian_filter\nfrom sunpy.map import Map,GenericMap\n\nimport synthesizAR\nfrom synthesizAR.util import EMCube\nfrom synthesizAR.instruments import InstrumentHinodeEIS\n\n%matplotlib inline\n\nbase1 = '/data/datadrive1/ar_forward_modeling/systematic_ar_study/noaa1109_tn{}'\nbase2 = '/data/datadrive2/ar_viz/systematic_ar_study/noaa1109_tn{}/'\n\neis = InstrumentHinodeEIS([7.5e3,1.25e4]*u.s)\n\nfrequencies = [250,750,'750-ion',2500,5000]\n\ntemperature_bin_edges = 10.**(np.arange(5.6, 7.0, 0.05))*u.K",
"Iterate over all \"true\" emission measure distributions and time-average them over the given interval.",
"time_averaged_ems = {'{}'.format(freq):None for freq in frequencies}\n\nfor freq in frequencies:\n print('tn = {} s'.format(freq))\n if type(freq) == int:\n base = base1\n else:\n base = base2\n # setup field and observer objects\n field = synthesizAR.Skeleton.restore(os.path.join(base.format(freq),'field_checkpoint'))\n observer = synthesizAR.Observer(field,[eis],ds=field._convert_angle_to_length(0.4*u.arcsec))\n observer.build_detector_files(base.format(freq))\n # iterate over time\n for time in eis.observing_time:\n print('t = {}'.format(time))\n emcube = observer.make_emission_measure_map(time,eis,temperature_bin_edges=temperature_bin_edges)\n if time_averaged_ems['{}'.format(freq)] is None:\n time_averaged_ems['{}'.format(freq)] = emcube\n for m in time_averaged_ems['{}'.format(freq)]:\n m.data /= eis.observing_time.shape[0]\n else:\n for m1,m2 in zip(time_averaged_ems['{}'.format(freq)],emcube):\n m1.data += m2.data/eis.observing_time.shape[0]",
"Visualize the results to make sure we've averaged correctly.",
"fig = plt.figure(figsize=(20,15))\nplt.subplots_adjust(right=0.87)\ncax = fig.add_axes([0.88, 0.12, 0.025, 0.75])\nplt.subplots_adjust(hspace=0.1)\nfor i in range(time_averaged_ems['250'].temperature_bin_edges.shape[0]-1):\n # apply a filter to the \n tmp = time_averaged_ems['250'][i].submap(u.Quantity([250,500],u.arcsec),u.Quantity([150,400],u.arcsec))\n tmp.data = gaussian_filter(tmp.data,\n eis.channels[0]['gaussian_width']['x'].value\n )\n # set up axes properly and add plot\n ax = fig.add_subplot(6,5,i+1,projection=tmp)\n im = tmp.plot(axes=ax,\n annotate=False,\n cmap=matplotlib.cm.get_cmap('magma'),\n norm=matplotlib.colors.SymLogNorm(1, vmin=1e25, vmax=1e29)\n )\n # set title and labels\n ax.set_title(r'${t0:.2f}-{t1:.2f}$ {uni}'.format(t0=np.log10(tmp.meta['temp_a']),\n t1=np.log10(tmp.meta['temp_b']),uni='K'))\n if i<25:\n ax.coords[0].set_ticklabel_visible(False)\n else:\n ax.set_xlabel(r'$x$ ({})'.format(u.Unit(tmp.meta['cunit1'])))\n if i%5==0:\n ax.set_ylabel(r'$y$ ({})'.format(u.Unit(tmp.meta['cunit2'])))\n else:\n ax.coords[1].set_ticklabel_visible(False)\ncbar = fig.colorbar(im,cax=cax)",
"Now save the results to our local temporary data folder.",
"for key in time_averaged_ems:\n time_averaged_ems[key].save('../data/em_cubes_true_tn{}_t7500-12500.h5'.format(key))\n\nfoo = EMCube.restore('../data/em_cubes_tn250_t7500-12500.h5')\n\nfig = plt.figure(figsize=(20,15))\nplt.subplots_adjust(right=0.87)\ncax = fig.add_axes([0.88, 0.12, 0.025, 0.75])\nplt.subplots_adjust(hspace=0.1)\nfor i in range(foo.temperature_bin_edges.shape[0]-1):\n # apply a filter to the \n tmp = foo[i].submap(u.Quantity([250,500],u.arcsec),u.Quantity([150,400],u.arcsec))\n tmp.data = gaussian_filter(tmp.data,\n eis.channels[0]['gaussian_width']['x'].value\n )\n # set up axes properly and add plot\n ax = fig.add_subplot(6,5,i+1,projection=tmp)\n im = tmp.plot(axes=ax,\n annotate=False,\n cmap=matplotlib.cm.get_cmap('magma'),\n norm=matplotlib.colors.SymLogNorm(1, vmin=1e25, vmax=1e29)\n )\n # set title and labels\n ax.set_title(r'${t0:.2f}-{t1:.2f}$ {uni}'.format(t0=np.log10(tmp.meta['temp_a']),\n t1=np.log10(tmp.meta['temp_b']),uni='K'))\n if i<25:\n ax.coords[0].set_ticklabel_visible(False)\n else:\n ax.set_xlabel(r'$x$ ({})'.format(u.Unit(tmp.meta['cunit1'])))\n if i%5==0:\n ax.set_ylabel(r'$y$ ({})'.format(u.Unit(tmp.meta['cunit2'])))\n else:\n ax.coords[1].set_ticklabel_visible(False)\ncbar = fig.colorbar(im,cax=cax)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
wei-Z/Python-Machine-Learning
|
code/bonus/logistic_regression.ipynb
|
mit
|
[
"Sebastian Raschka, 2015\nhttps://github.com/rasbt/python-machine-learning-book\nPython Machine Learning Essentials - Code Examples\nBonus Material - A Simple Logistic Regression Implementation\nNote that the optional watermark extension is a small IPython notebook plugin that I developed to make the code reproducible. You can just skip the following line(s).",
"%load_ext watermark\n%watermark -a 'Sebastian Raschka' -u -d -v -p numpy,pandas,matplotlib\n\n# to install watermark just uncomment the following line:\n#%install_ext https://raw.githubusercontent.com/rasbt/watermark/master/watermark.py",
"Overview\nPlease see Chapter 3 for more details on logistic regression.\n\nImplementing logistic regression in Python\nThe following implementation is similar to the Adaline implementation in Chapter 2 except that we replace the sum of squared errors cost function with the logistic cost function\n$$J(\\mathbf{w}) = \\sum_{i=1}^{m} - y^{(i)} log \\bigg( \\phi\\big(z^{(i)}\\big) \\bigg) - \\big(1 - y^{(i)}\\big) log\\bigg(1-\\phi\\big(z^{(i)}\\big)\\bigg).$$",
"class LogisticRegression(object):\n \"\"\"LogisticRegression classifier.\n\n Parameters\n ------------\n eta : float\n Learning rate (between 0.0 and 1.0)\n n_iter : int\n Passes over the training dataset.\n\n Attributes\n -----------\n w_ : 1d-array\n Weights after fitting.\n cost_ : list\n Cost in every epoch.\n\n \"\"\"\n def __init__(self, eta=0.01, n_iter=50):\n self.eta = eta\n self.n_iter = n_iter\n\n def fit(self, X, y):\n \"\"\" Fit training data.\n\n Parameters\n ----------\n X : {array-like}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n y : array-like, shape = [n_samples]\n Target values.\n\n Returns\n -------\n self : object\n\n \"\"\"\n self.w_ = np.zeros(1 + X.shape[1])\n self.cost_ = [] \n for i in range(self.n_iter):\n y_val = self.activation(X)\n errors = (y - y_val)\n neg_grad = X.T.dot(errors)\n self.w_[1:] += self.eta * neg_grad\n self.w_[0] += self.eta * errors.sum()\n self.cost_.append(self._logit_cost(y, self.activation(X)))\n return self\n\n def _logit_cost(self, y, y_val):\n logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))\n return logit\n \n def _sigmoid(self, z):\n return 1.0 / (1.0 + np.exp(-z))\n \n def net_input(self, X):\n \"\"\"Calculate net input\"\"\"\n return np.dot(X, self.w_[1:]) + self.w_[0]\n\n def activation(self, X):\n \"\"\" Activate the logistic neuron\"\"\"\n z = self.net_input(X)\n return self._sigmoid(z)\n \n def predict_proba(self, X):\n \"\"\"\n Predict class probabilities for X.\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n \n Returns\n ----------\n Class 1 probability : float\n \n \"\"\"\n return activation(X)\n\n def predict(self, X):\n \"\"\"\n Predict class labels for X.\n \n Parameters\n ----------\n X : {array-like, sparse matrix}, shape = [n_samples, n_features]\n Training vectors, where n_samples is the number of samples and\n n_features is the number of features.\n \n Returns\n ----------\n class : int\n Predicted class label.\n \n \"\"\"\n # equivalent to np.where(self.activation(X) >= 0.5, 1, 0)\n return np.where(self.net_input(X) >= 0.0, 1, 0)",
"Reading-in the Iris data",
"import pandas as pd\n\ndf = pd.read_csv('https://archive.ics.uci.edu/ml/'\n 'machine-learning-databases/iris/iris.data', header=None)\ndf.tail()\n\nimport numpy as np\n\n# select setosa and versicolor\ny = df.iloc[0:100, 4].values\ny = np.where(y == 'Iris-setosa', 1, 0)\n\n# extract sepal length and petal length\nX = df.iloc[0:100, [0, 2]].values\n\n# standardize features\nX_std = np.copy(X)\nX_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()\nX_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()",
"A function for plotting decision regions",
"from matplotlib.colors import ListedColormap\n\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\n\n # setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n # plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],\n alpha=0.8, c=cmap(idx),\n marker=markers[idx], label=cl)\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\nlr = LogisticRegression(n_iter=500, eta=0.2).fit(X_std, y)\nplt.plot(range(1, len(lr.cost_) + 1), np.log10(lr.cost_))\nplt.xlabel('Epochs')\nplt.ylabel('Cost')\nplt.title('Logistic Regression - Learning rate 0.01')\n\nplt.tight_layout()\nplt.show()\n\nplot_decision_regions(X_std, y, classifier=lr)\nplt.title('Logistic Regression - Gradient Descent')\nplt.xlabel('sepal length [standardized]')\nplt.ylabel('petal length [standardized]')\nplt.legend(loc='upper left')\nplt.tight_layout()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
benkoo/fast_ai_coursenotes
|
kerasTutorial/FirstMNISTProgram.ipynb
|
apache-2.0
|
[
"%matplotlib inline\nimport keras.backend as K\nK.set_image_dim_ordering('th')\n\nimport numpy as np\nnp.random.seed(123)",
"Next, we'll import the Sequential model type from Keras. This is simply a linear stack of neural network layers, and it's perfect for the type of feed-forward CNN we're building in this tutorial.",
"from keras.models import Sequential\n\nfrom keras.layers import Dense, Dropout, Activation, Flatten\n\nfrom keras.layers import Convolution2D, MaxPooling2D\n\nfrom keras.utils import np_utils\n\nfrom keras.datasets import mnist\n#load pre-shffuled MNIST data into train and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nprint(X_train.shape)\n\nfrom matplotlib import pyplot as plt\nplt.imshow(X_train[0])\n\nX_train = X_train.reshape(X_train.shape[0],1,28,28)\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28)\n\nprint(X_train.shape)\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\nprint(y_train.shape)\n\nprint(y_train[:10])\n\nY_train=np_utils.to_categorical(y_train, 10)\nY_test = np_utils.to_categorical(y_test, 10)\n\nprint(Y_train.shape)",
"Step 7: Define model architecture.\nNow we're ready to define our model architecture. In actual R&D work, researchers will spend a considerable amount of time studying model architectures.\nTo keep this tutorial moving along, we're not going to discuss the theory or math here. This alone is a rich and meaty field, and we recommend the CS231n class mentioned earlier for those who want to learn more.\nPlus, when you're just starting out, you can just replicate proven architectures from academic papers or use existing examples. Here's a list of example implementations in Keras.\nLet's start by declaring a sequential model format:",
"model = Sequential()\n\nmodel.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))\n\nprint(model.output_shape)\n\nmodel.add(Convolution2D(32, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel = Sequential()\n \nmodel.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))\nmodel.add(Convolution2D(32, 3, 3, activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.25))\n \nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nmodel.fit(X_train, Y_train, \n batch_size=32, nb_epoch=10, verbose=1)\n\nscore = model.evaluate(X_test, Y_test, verbose=0)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
metpy/MetPy
|
v0.9/_downloads/52c3e3d710569bed83f26e14e23bb356/Inverse_Distance_Verification.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Inverse Distance Verification: Cressman and Barnes\nCompare inverse distance interpolation methods\nTwo popular interpolation schemes that use inverse distance weighting of observations are the\nBarnes and Cressman analyses. The Cressman analysis is relatively straightforward and uses\nthe ratio between distance of an observation from a grid cell and the maximum allowable\ndistance to calculate the relative importance of an observation for calculating an\ninterpolation value. Barnes uses the inverse exponential ratio of each distance between\nan observation and a grid cell and the average spacing of the observations over the domain.\nAlgorithmically:\n\nA KDTree data structure is built using the locations of each observation.\nAll observations within a maximum allowable distance of a particular grid cell are found in\n O(log n) time.\nUsing the weighting rules for Cressman or Barnes analyses, the observations are given a\n proportional value, primarily based on their distance from the grid cell.\nThe sum of these proportional values is calculated and this value is used as the\n interpolated value.\nSteps 2 through 4 are repeated for each grid cell.",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.spatial import cKDTree\nfrom scipy.spatial.distance import cdist\n\nfrom metpy.interpolate.geometry import dist_2\nfrom metpy.interpolate.points import barnes_point, cressman_point\nfrom metpy.interpolate.tools import calc_kappa\n\n\ndef draw_circle(ax, x, y, r, m, label):\n th = np.linspace(0, 2 * np.pi, 100)\n nx = x + r * np.cos(th)\n ny = y + r * np.sin(th)\n ax.plot(nx, ny, m, label=label)",
"Generate random x and y coordinates, and observation values proportional to x * y.\nSet up two test grid locations at (30, 30) and (60, 60).",
"np.random.seed(100)\n\npts = np.random.randint(0, 100, (10, 2))\nxp = pts[:, 0]\nyp = pts[:, 1]\nzp = xp * xp / 1000\n\nsim_gridx = [30, 60]\nsim_gridy = [30, 60]",
"Set up a cKDTree object and query all of the observations within \"radius\" of each grid point.\nThe variable indices represents the index of each matched coordinate within the\ncKDTree's data list.",
"grid_points = np.array(list(zip(sim_gridx, sim_gridy)))\n\nradius = 40\nobs_tree = cKDTree(list(zip(xp, yp)))\nindices = obs_tree.query_ball_point(grid_points, r=radius)",
"For grid 0, we will use Cressman to interpolate its value.",
"x1, y1 = obs_tree.data[indices[0]].T\ncress_dist = dist_2(sim_gridx[0], sim_gridy[0], x1, y1)\ncress_obs = zp[indices[0]]\n\ncress_val = cressman_point(cress_dist, cress_obs, radius)",
"For grid 1, we will use barnes to interpolate its value.\nWe need to calculate kappa--the average distance between observations over the domain.",
"x2, y2 = obs_tree.data[indices[1]].T\nbarnes_dist = dist_2(sim_gridx[1], sim_gridy[1], x2, y2)\nbarnes_obs = zp[indices[1]]\n\nave_spacing = np.mean((cdist(list(zip(xp, yp)), list(zip(xp, yp)))))\nkappa = calc_kappa(ave_spacing)\n\nbarnes_val = barnes_point(barnes_dist, barnes_obs, kappa)",
"Plot all of the affiliated information and interpolation values.",
"fig, ax = plt.subplots(1, 1, figsize=(15, 10))\nfor i, zval in enumerate(zp):\n ax.plot(pts[i, 0], pts[i, 1], '.')\n ax.annotate(str(zval) + ' F', xy=(pts[i, 0] + 2, pts[i, 1]))\n\nax.plot(sim_gridx, sim_gridy, '+', markersize=10)\n\nax.plot(x1, y1, 'ko', fillstyle='none', markersize=10, label='grid 0 matches')\nax.plot(x2, y2, 'ks', fillstyle='none', markersize=10, label='grid 1 matches')\n\ndraw_circle(ax, sim_gridx[0], sim_gridy[0], m='k-', r=radius, label='grid 0 radius')\ndraw_circle(ax, sim_gridx[1], sim_gridy[1], m='b-', r=radius, label='grid 1 radius')\n\nax.annotate('grid 0: cressman {:.3f}'.format(cress_val), xy=(sim_gridx[0] + 2, sim_gridy[0]))\nax.annotate('grid 1: barnes {:.3f}'.format(barnes_val), xy=(sim_gridx[1] + 2, sim_gridy[1]))\n\nax.set_aspect('equal', 'datalim')\nax.legend()",
"For each point, we will do a manual check of the interpolation values by doing a step by\nstep and visual breakdown.\nPlot the grid point, observations within radius of the grid point, their locations, and\ntheir distances from the grid point.",
"fig, ax = plt.subplots(1, 1, figsize=(15, 10))\nax.annotate('grid 0: ({}, {})'.format(sim_gridx[0], sim_gridy[0]),\n xy=(sim_gridx[0] + 2, sim_gridy[0]))\nax.plot(sim_gridx[0], sim_gridy[0], '+', markersize=10)\n\nmx, my = obs_tree.data[indices[0]].T\nmz = zp[indices[0]]\n\nfor x, y, z in zip(mx, my, mz):\n d = np.sqrt((sim_gridx[0] - x)**2 + (y - sim_gridy[0])**2)\n ax.plot([sim_gridx[0], x], [sim_gridy[0], y], '--')\n\n xave = np.mean([sim_gridx[0], x])\n yave = np.mean([sim_gridy[0], y])\n\n ax.annotate('distance: {}'.format(d), xy=(xave, yave))\n ax.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y))\n\nax.set_xlim(0, 80)\nax.set_ylim(0, 80)\nax.set_aspect('equal', 'datalim')",
"Step through the cressman calculations.",
"dists = np.array([22.803508502, 7.21110255093, 31.304951685, 33.5410196625])\nvalues = np.array([0.064, 1.156, 3.364, 0.225])\n\ncres_weights = (radius * radius - dists * dists) / (radius * radius + dists * dists)\ntotal_weights = np.sum(cres_weights)\nproportion = cres_weights / total_weights\nvalue = values * proportion\n\nval = cressman_point(cress_dist, cress_obs, radius)\n\nprint('Manual cressman value for grid 1:\\t', np.sum(value))\nprint('Metpy cressman value for grid 1:\\t', val)",
"Now repeat for grid 1, except use barnes interpolation.",
"fig, ax = plt.subplots(1, 1, figsize=(15, 10))\nax.annotate('grid 1: ({}, {})'.format(sim_gridx[1], sim_gridy[1]),\n xy=(sim_gridx[1] + 2, sim_gridy[1]))\nax.plot(sim_gridx[1], sim_gridy[1], '+', markersize=10)\n\nmx, my = obs_tree.data[indices[1]].T\nmz = zp[indices[1]]\n\nfor x, y, z in zip(mx, my, mz):\n d = np.sqrt((sim_gridx[1] - x)**2 + (y - sim_gridy[1])**2)\n ax.plot([sim_gridx[1], x], [sim_gridy[1], y], '--')\n\n xave = np.mean([sim_gridx[1], x])\n yave = np.mean([sim_gridy[1], y])\n\n ax.annotate('distance: {}'.format(d), xy=(xave, yave))\n ax.annotate('({}, {}) : {} F'.format(x, y, z), xy=(x, y))\n\nax.set_xlim(40, 80)\nax.set_ylim(40, 100)\nax.set_aspect('equal', 'datalim')",
"Step through barnes calculations.",
"dists = np.array([9.21954445729, 22.4722050542, 27.892651362, 38.8329756779])\nvalues = np.array([2.809, 6.241, 4.489, 2.704])\n\nweights = np.exp(-dists**2 / kappa)\ntotal_weights = np.sum(weights)\nvalue = np.sum(values * (weights / total_weights))\n\nprint('Manual barnes value:\\t', value)\nprint('Metpy barnes value:\\t', barnes_point(barnes_dist, barnes_obs, kappa))\n\nplt.show()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
datactive/bigbang
|
examples/datatracker/Working Group Affiliations.ipynb
|
mit
|
[
"from ietfdata.datatracker import *\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport pandas as pd\n\nimport bigbang.analysis.process as process",
"Get draft data\nFirst, get the sample drafts for a range of dates.",
"window = {\n 'since' : \"2018-01-01T00:00:00\",\n 'until' : \"2018-01-10T00:00:00\"\n}\n\ndt = DataTracker()\n\ndef extract_data(doc):\n data = {}\n ## TODO: Add document UID?\n data['title'] = doc.title\n data['time'] = doc.time\n data['group-acronym'] = dt.group(doc.group).acronym\n data['type'] = doc.type.uri\n\n affiliations = [\n doc_author.affiliation\n for doc_author\n in dt.document_authors(doc)\n ]\n \n return [\n {**data,\n 'affiliation' : affiliation}\n for affiliation\n in affiliations]\n\ndrafts = dt.documents(doctype = dt.document_type(\n DocumentTypeURI(\"/api/v1/name/doctypename/draft\")),\n **window\n )\n\ndata = [item for sublist in [extract_data(x) for x in drafts] for item in sublist]\n\ndraft_df = pd.DataFrame(data=data)\n\ndraft_df.head()",
"Create a table for the group and affiliation links in particular.",
"link_df = draft_df[['group-acronym', 'affiliation','time']]\nlink_df[:5]",
"Entity resolution on the affiliations\nUsing containment distance, collapse the entities in the affilation column and removed suspected duplicates.",
"all_affiliations = link_df.groupby('affiliation').size()\n\nents = process.resolve_entities(all_affiliations,\n process.containment_distance,\n threshold=.25)\n\nreplacements = {}\nfor r in [{name: ent for name in ents[ent]} for ent in ents]:\n replacements.update(r)\n \nlink_df = link_df.replace(to_replace=replacements)",
"Plot the network links between working groups and affiliations",
"edges = [\n (row[1]['group-acronym'], row[1]['affiliation'])\n for row\n in link_df[['group-acronym','affiliation']].iterrows()\n]\n\nG = nx.Graph()\n\nG.add_nodes_from([x[0]\n for x\n in link_df[['group-acronym']].drop_duplicates().values],\n category=0)\nG.add_nodes_from(all_affiliations.index,\n category=1)\n\nG.add_edges_from(edges)\n\n## Clean the graph\n\nG.remove_node('none')\n\nfor c in list(nx.connected_components(G)):\n if len(c) <= 1:\n for n in c:\n G.remove_node(n)\n\ncolors = ['r' if x[1]['category'] else 'g' for x in list(G.nodes(data=True))]\n\nnx.draw(\n G,\n node_color = colors,\n with_labels = True\n)\n\nnx.write_gexf(G,\"group-affiliations.gexf\")",
"Look at categories of affiliations",
"import bigbang.datasets.organizations as organizations\ncat = organizations.load_data()\n\nreplacements = {x[1]['name'] : x[1]['category']\n for x\n in cat.iterrows()\n if not pd.isna(x[1]['category'])}\n\nlink_cat_df = link_df.replace(to_replace=replacements)\n\nlink_cat_df.groupby('affiliation').size().sort_values(ascending=False)[:40]\n\nedges = [\n (row[1]['group-acronym'], row[1]['affiliation'])\n for row\n in link_cat_df[['group-acronym','affiliation']].iterrows()\n]\n\naffils = link_cat_df.groupby('affiliation').size().index\n\nG = nx.Graph()\n\nG.add_nodes_from([x[0]\n for x\n in link_cat_df[['group-acronym']].drop_duplicates().values],\n category=0)\nG.add_nodes_from(affils,\n category=1)\n\nG.add_edges_from(edges)\n\n## Clean the graph\n\nG.remove_node('none')\n\nfor c in list(nx.connected_components(G)):\n if len(c) <= 1:\n for n in c:\n G.remove_node(n)\n\n[x[1] for x in list(G.nodes(data=True)) if 'category' not in x[1]]\n\ncolors = ['r' if x[1]['category'] else 'g' for x in list(G.nodes(data=True))]\n\nnx.draw(\n G,\n node_color = colors,\n with_labels = True\n)\n\nnx.write_gexf(G,\"group-org-categories.gexf\")"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Hexiang-Hu/mmds
|
final/Final-basic.ipynb
|
mit
|
[
"import numpy as np\nimport math\n",
"Q1. Solution\n\n3-shingles for \"hello world\":\nhel, ell, llo, lo_, o_w ,_wo, wor, orl, rld => 9 in total\n\n\n\nQ2. Solution",
"## Q2 Solution.\ndef hash(x):\n return math.fmod(3 * x + 2, 11)\n\nfor i in xrange(1,12):\n print hash(i)",
"Q3.\n\n\nThis question involves three different Bloom-filter-like scenarios. Each scenario involves setting to 1 certain bits of a 10-bit array, each bit of which is initially 0.\n\n\nScenario A: \n\nwe use one hash function that randomly, and with equal probability, selects one of the ten bits of the array. We apply this hash function to four different inputs and set to 1 each of the selected bits.\n\n\n\nScenario B: \n\nWe use two hash functions, each of which randomly, with equal probability, and independently of the other hash function selects one of the of 10 bits of the array. We apply both hash functions to each of two inputs and set to 1 each of the selected bits.\n\n\n\nScenario C: \n\nWe use one hash function that randomly and with equal probability selects two different bits among the ten in the array. We apply this hash function to two inputs and set to 1 each of the selected bits.\n\n\n\nLet a, b, and c be the expected number of bits set to 1 under scenarios A, B, and C, respectively. Which of the following correctly describes the relationships among a, b, and c?",
"## Q3 Solution.\nprob = 1.0 / 10\na = (1 - prob)**4\nprint a\nb = (1 - ( 1 - (1 - prob)**2) )**2\nprint b\nc = (1 - (1.0 /10 * 1.0 / 9))\nprint c",
"Q4.\n\nIn this market-basket problem, there are 99 items, numbered 2 to 100. There is a basket for each prime number between 2 and 100. The basket for p contains all and only the items whose numbers are a multiple of p. For example, the basket for 17 contains the following items: {17, 34, 51, 68, 85}. What is the support of the pair of items {12, 30}?\n\nQ4 Solution.\nsupport = 2 => {2,4,6,8, ...} & {3, 6, 9,...}\nQ5.\n\nTo two decimal places, what is the cosine of the angle between the vectors [2,1,1] and [10,-7,1]?",
"## Q5 Solution.\nvec1 = np.array([2, 1, 1])\nvec2 = np.array([10, -7, 1])\nprint vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))",
"Q6.\n\nIn this question we use six minhash functions, organized as three bands of two rows each, to identify sets of high Jaccard similarity. If two sets have Jaccard similarity 0.6, what is the probability (to two decimal places) that this pair will become a candidate pair?",
"## Q6 Solution.\n\n# probability that they agree at one particular band\np1 = 0.6**2\nprint (1 - p1)**3",
"Q7.\n\nSuppose we have a (.4, .6, .9, .1)-sensitive family of functions. If we apply a 3-way OR construction to this family, we get a new family of functions whose sensitivity is:",
"## Q7 Solution.\np1 = 1 - (1 - .9)**3\np2 = 1 - (1 - .1)**3\nprint \"new LSH is (.4, .6, {}, {})-sensitive family\".format(p1, p2)",
"Q8.\n\nSuppose we have a database of (Class, Student, Grade) facts, each giving the grade the student got in the class. We want to estimate the fraction of students who have gotten A's in at least 10 classes, but we do not want to examine the entire relation, just a sample of 10% of the tuples. We shall hash tuples to 10 buckets, and take only those tuples in the first bucket. But to get a valid estimate of the fraction of students with at least 10 A's, we need to pick our hash key judiciously. To which Attribute(s) of the relation should we apply the hash function?\n\nQ8 Solution.\n\nWe will need to hash it to with regard to class and students\n\nQ9\n\n\nSuppose the Web consists of four pages A, B, C, and D, that form a chain\n A-->B-->C-->D\n\n\nWe wish to compute the PageRank of each of these pages, but since D is a \"dead end,\" we will \"teleport\" from D with probability 1 to one of the four pages, each with equal probability. We do not teleport from pages A, B, or C. Assuming the sum of the PageRanks of the four pages is 1, what is the PageRank of page B, correct to two decimal places?",
"## Q9 Solution.\nM = np.array([[0, 0, 0, .25],\n [1, 0, 0, .25],\n [0, 1, 0, .25],\n [0, 0, 1, .25]])\nr = np.array([.25, .25, .25, .25])\n\nfor i in xrange(30):\n r = M.dot(r)\n\nprint r",
"Q10.\n\nSuppose in the AGM model we have four individuals (A,B,C,D} and two communities. Community 1 consists of {A,B,C} and Community 2 consists of {B,C,D}. For Community 1 there is a 30% chance- it will cause an edge between any two of its members. For Community 2 there is a 40% chance it will cause an edge between any two of its members. To the nearest two decimal places, what is the probability that there is an edge between B and C?",
"## Q10 Solution.\nprint 1 - (1 - .3)*(1 - .4)",
"Q11.\n\nX is a dataset of n columns for which we train a supervised Machine Learning algorithm. e is the error of the model measured against a validation dataset. Unfortunately, e is too high because model has overfitted on the training data X and it doesn't generalize well. We now decide to reduce the model variance by reducing the dimensionality of X, using a Singular Value Decomposition, and using the resulting dataset to train our model. If i is the number of singular values used in the SVD reduction, how does e change as a function of i, for i ∈ {1, 2,...,n}?\n\nSolution.\n\nA Convex Function starts low",
"##Q12\nL = np.array([[-.25, -.5, -.76, -.29, -.03, -.07, -.01],\n [-.05, -.1, -.15, .20, .26, .51, .77 ]]).T\nprint L\n\nV = np.array([[6.74, 0],[0, 5.44]])\nprint V\n\nR = np.array([[-.57, -.11, -.57, -.11, -.57],\n [-.09, 0.70, -.09, .7, -.09]])\nprint R\nprint L.dot(V).dot(R)",
"Q13.\n\n\nRecall that the power iteration does r=X·r until converging, where X is a nxn matrix and n is the number of nodes in the graph. \n\n\nUsing the power iteration notation above, what is matrix X value when solving topic sensitive Pagerank with teleport set {0,1} for the following graph? Use beta=0.8. (Recall that the teleport set contains the destination nodes used when teleporting).",
"X = 0.8 * np.array([[1.0/3, 0, 0],\n [1.0/3, 0, 0],\n [1.0/3, 1, 0]])\nX += 0.2 * np.array([[.5, .5, .5],\n [.5, .5, .5],\n [ 0, 0, 0]])\nprint X",
"Q14.\n\nHere are two sets of integers S = {1,2,3,4} and T = {1,2,5,6,x}, where x stands for some integer. For how many different integer values of x are the Jaccard similarity and the Jaccard distance of S and T the same? (Note: x can be one of 1, 2, 5, or 6, but in that case T, being a set, will contain x only once and thus have four members, not five.)\n\nSolution.\n\nx = 3 or x = 4\n\nQ15.\n\nWhich of the following are advantages of using decision trees? (check all correct options)\n\nSolution.\n\nMy Answer\nIt can handle categorical input data without any special preprocessing\nThe resulting model is easy to interpret\nThe training is easy to parallelize\n\n\n\nQ16.\n\nConsider a dataset of points xi,....xn with labels yi,....,yi ∈ {-1, 1}, such that the data is separable. We run a soft-margin SVM and a hard-margin SVM, and in each case we obtain parameters w and b. Check the option that is true:\nThe resulting w and b can be different, and the boundaries can be different.\nThe resulting w and b are the same in the two cases, hence boundaries are the same.\nThe resulting w and b can be different in the two cases, but the boundaries are the same.\nNone of the above.\n\n\n\nQ17.\n\nConsider the following MapReduce algorithm. The input is a collection of positive integers. Given integer X, the Map function produces a tuple with key Y and value X for each prime divisor Y of X. For example, if X = 20, there are two key-value pairs: (2,20) and (5,20). The Reduce function, given a key K and list L of values, produces a tuple with key K and value sum(L) i.e., the sum of the values in the list. Given the input 9, 15, 16, 23, 25, 27, 28, 56 which of the following tuples appears in the final output?\n\nSolution.\n\n{2, 16 + 28 + 56 } => {2, 100}\n{3, 9 + 15 + 27 } => {3, 51}\n{5, 15 + 25} => {5, 40}\n{7, 28 + 56} => {7, 84}\n\nQ18.\n\n\nSuppose we run K-means clustering over the following set of points in 2-d space using the L1 distance metric: \n\n(1,1), (2,1) (2,2), (3,3), (4,2), (2,4), (4,4). \nWe pick k=2 and the initial centroids are (1,1) and (4,4). \nWhich of these is the centroid of the cluster containing the point (3,3) when the algorithm terminates? \n\n\n\nRecall that the L1 distance between two points is the sum of their distances along each dimension, \n\ne.g. the L1 distance between (1, 2) and (-1, 3) is 3.",
"from scipy.cluster.vq import kmeans,vq\n\ndef L1(p1, p2):\n return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])\n\nc1 = [(1,1), (4,4)]\npoints = np.array([[1, 1], [2, 1], [2, 2], [3, 3], [4, 2], [2, 4], [4, 4]]\n)\n# for point in points:\n# minDist = 9999\n# minIdx = None\n# for point in points:\n# print points",
"Q20.\n\nConsider an execution of the BALANCE algorithm with 4 advertisers, A1, A2, A3, A4, and 4 kinds of queries, Q1, Q2, Q3, Q4. \nAdvertiser A1 bids on queries Q1 and Q2; \nA2 bids on queries Q2 and Q3; \nA3 on queries Q3 and Q4; \nand A4 on queries Q1 and Q4. \nAll bids are equal to 1, and all clickthrough rates are equal. \n\n\n\nAll advertisers have a budget of 3, and ties are broken in favor of the advertiser with the lower index (e.g., A1 beats A2). Queries appear in the following order: \n Q1, Q2, Q3, Q3, Q1, Q2, Q3, Q1, Q4, Q1 \n\n\nWhich advertiser’s budget is exhausted first?\n\n\nSolution\n\nA1 will exhausted first\n\nQ21.\n\n\nConsider the bipartite graph with the following edges (you might want to draw a picture): \n (a,1), (a,3), (b,1), (b,2), (b,4), (c,2), (d,1), (d,4) \n\n\nWhich of the following edges appears in NO perfect matching?\n\n\nSolution\n\nperfect match 1: a-3, b-4, c-2, d-1\nperfect match 2: a-3, b-1, c-2, d-4\n\nQ22.\n\nThe Utility Matrix below captures the ratings of 5 users (A,B,C,D,E) for 5 movies (P,Q,R,S,T). Each known rating is a number between 1 and 5, and blanks represent unknown ratings. What is the Pearson Correlation (also known as the Centered Cosine) between users B and D?\n\n<pre>\n P Q R S T\n A 2 4 \n B 3 1 2 \n C 5 5 \n D 4 3 2\n E 4 5 1\n\n</pre>",
"## Solution\nvec1 = np.array([0, 1, -1, 0, 0])\nvec2 = np.array([0, 1, 0, 0, -1])\nprint vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))",
"Q23.\n\nThe Utility Matrix below captures the ratings of 5 users (A,B,C,D,E) for 5 movies (P,Q,R,S,T). Each known rating is a number between 1 and 5, and blanks represent unknown ratings. Let (U,M) denote the rating of movie M by user U. We evaluate a Recommender System by withholding the ratings (A,P), (B,Q), and (C,S). The recommender system estimates (A,P)=1, (B,Q)=4, and (C,S)=5. What is the RMSE of the Recommender System, rounded to 2 decimal places?\n\n<pre>\n\n P Q R S T\n A 2 4 \n B 3 1 2 \n C 5 5 \n D 4 3 2\n E 4 5 1\n\n</pre>",
"## Solution\nprint \"RMSE = {}\".format(math.sqrt(2.0 / 3))",
"Q24.\n\nWe are going to perform a hierarchical (agglomerative) clustering on the four strings {he, she, her, their}, using edit distance (just insertions and deletions; no mutations of characters). \nInitially, each string is in a cluster by itself. \nThe distance between two clusters is the minimum edit distance between two strings, one chosen from each of the two clusters.\nWhen we complete the hierarchical clustering, there is one cluster containing all four strings, and we performed three mergers of clusters to get to that point. \nFor each of the three mergers there was a distance between the merged clusters. What is the sum of those three distances?"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
1x0r/pspis
|
labs/demos-1.ipynb
|
mit
|
[
"from sklearn import datasets\ndigits = datasets.load_digits()\nprint(\"Набор данных для обучения (2D NumPy Array): \\n\", digits.data, '\\n')\nprint(\"Набор целей для данных (1D NumPy Array): \\n\", digits.target)\n\nprint(\"Форма массива данных для обучения: \\n\", digits.data.shape)\nprint(\"Форма массива целей: \\n\", digits.target.shape)",
"Какую задачу можно поставить для этого набора данных?",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nn = 19\nprint(\"Каждая цифра представлена матрицей формы \", digits.data[n, :].shape)",
"Чтобы отобразить её на экране, нужно применить метод reshape. Целевая форма — $8 \\times 8$.",
"digit = 255 - digits.data[n, :].reshape(8, 8)\nplt.imshow(digit, cmap='gray', interpolation='none')\nplt.title(\"This is \" + str(digits.target[n]))\nplt.show()",
"Возьмем один из методов прошлой лекции. Например, метод классификации, основанный на деревьях (CART).",
"from sklearn.tree import DecisionTreeClassifier",
"Почти у всех классов, отвечающих за методы классификации в scikit-learn, есть следующие методы:\n- fit — обучение модели;\n- predict — классификация примера обученным классификатором;\n- score —оценка качества классификации в соответствии с некоторым критерием.\nЧтобы создать дерево-классификатор, достаточно создать объект класса DecisionTreeClassifier",
"clf = DecisionTreeClassifier(random_state=0)",
"Обучим классификатор на всех цифрах, кроме последних 10.",
"clf.fit(digits.data[:-10], digits.target[:-10])",
"Теперь попробуем классифицировать оставшиеся 10 картинок.",
"errors = 0\nfor i in range(1, 11):\n k = clf.predict(digits.data[-i].reshape(1, -1))\n print(\"Классификатор предсказал число {}, на самом деле это {}. Числа {}совпали.\"\n .format(k[0], digits.target[-i], \n \"\" if k[0] == digits.target[-i] else \"не \"))\n \n if k[0] != digits.target[-i]:\n errors += 1",
"Давайте посмотрим на \"проблемные\" числа:",
"fig = plt.figure(figsize=(12, 4))\nframe = 1\nfor i in range(1, 11):\n k = clf.predict(digits.data[-i].reshape(1, -1))\n if k[0] != digits.target[-i]:\n digit = 255 - digits.data[-i, :].reshape(8, 8)\n \n ax = fig.add_subplot(1, errors, frame) \n ax.imshow(digit, cmap='gray', interpolation='none')\n ax.set_title(\"This is {}, recognized as {}\".format(digits.target[-i], k[0]))\n frame += 1",
"Можно согласиться, что по крайней мере в двух из этих чисел могут ошибиться и люди."
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/inm/cmip6/models/inm-cm4-8/atmos.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Atmos\nMIP Era: CMIP6\nInstitute: INM\nSource ID: INM-CM4-8\nTopic: Atmos\nSub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos. \nProperties: 156 (127 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:54:04\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'inm', 'inm-cm4-8', 'atmos')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties --> Overview\n2. Key Properties --> Resolution\n3. Key Properties --> Timestepping\n4. Key Properties --> Orography\n5. Grid --> Discretisation\n6. Grid --> Discretisation --> Horizontal\n7. Grid --> Discretisation --> Vertical\n8. Dynamical Core\n9. Dynamical Core --> Top Boundary\n10. Dynamical Core --> Lateral Boundary\n11. Dynamical Core --> Diffusion Horizontal\n12. Dynamical Core --> Advection Tracers\n13. Dynamical Core --> Advection Momentum\n14. Radiation\n15. Radiation --> Shortwave Radiation\n16. Radiation --> Shortwave GHG\n17. Radiation --> Shortwave Cloud Ice\n18. Radiation --> Shortwave Cloud Liquid\n19. Radiation --> Shortwave Cloud Inhomogeneity\n20. Radiation --> Shortwave Aerosols\n21. Radiation --> Shortwave Gases\n22. Radiation --> Longwave Radiation\n23. Radiation --> Longwave GHG\n24. Radiation --> Longwave Cloud Ice\n25. Radiation --> Longwave Cloud Liquid\n26. Radiation --> Longwave Cloud Inhomogeneity\n27. Radiation --> Longwave Aerosols\n28. Radiation --> Longwave Gases\n29. Turbulence Convection\n30. Turbulence Convection --> Boundary Layer Turbulence\n31. Turbulence Convection --> Deep Convection\n32. Turbulence Convection --> Shallow Convection\n33. Microphysics Precipitation\n34. Microphysics Precipitation --> Large Scale Precipitation\n35. Microphysics Precipitation --> Large Scale Cloud Microphysics\n36. Cloud Scheme\n37. Cloud Scheme --> Optical Cloud Properties\n38. Cloud Scheme --> Sub Grid Scale Water Distribution\n39. Cloud Scheme --> Sub Grid Scale Ice Distribution\n40. Observation Simulation\n41. Observation Simulation --> Isscp Attributes\n42. Observation Simulation --> Cosp Attributes\n43. Observation Simulation --> Radar Inputs\n44. Observation Simulation --> Lidar Inputs\n45. Gravity Waves\n46. Gravity Waves --> Orographic Gravity Waves\n47. Gravity Waves --> Non Orographic Gravity Waves\n48. Solar\n49. Solar --> Solar Pathways\n50. Solar --> Solar Constant\n51. Solar --> Orbital Parameters\n52. Solar --> Insolation Ozone\n53. Volcanos\n54. Volcanos --> Volcanoes Treatment \n1. Key Properties --> Overview\nTop level key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Family\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of atmospheric model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"AGCM\" \n# \"ARCM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBasic approximations made in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"primitive equations\" \n# \"non-hydrostatic\" \n# \"anelastic\" \n# \"Boussinesq\" \n# \"hydrostatic\" \n# \"quasi-hydrostatic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Resolution\nCharacteristics of the model resolution\n2.1. Horizontal Resolution Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.3. Range Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"2.4. Number Of Vertical Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels resolved on the computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"2.5. High Top\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.resolution.high_top') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3. Key Properties --> Timestepping\nCharacteristics of the atmosphere model time stepping\n3.1. Timestep Dynamics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTimestep for the dynamics, e.g. 30 min.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.2. Timestep Shortwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the shortwave radiative transfer, e.g. 1.5 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.3. Timestep Longwave Radiative Transfer\nIs Required: FALSE Type: STRING Cardinality: 0.1\nTimestep for the longwave radiative transfer, e.g. 3 hours.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Orography\nCharacteristics of the model orography\n4.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the orography.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"present day\" \n# \"modified\" \n# TODO - please enter value(s)\n",
"4.2. Changes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nIf the orography type is modified describe the time adaptation changes.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.key_properties.orography.changes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"related to ice sheets\" \n# \"related to tectonics\" \n# \"modified mean\" \n# \"modified variance if taken into account in model (cf gravity waves)\" \n# TODO - please enter value(s)\n",
"5. Grid --> Discretisation\nAtmosphere grid discretisation\n5.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of grid discretisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Grid --> Discretisation --> Horizontal\nAtmosphere discretisation in the horizontal\n6.1. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spectral\" \n# \"fixed grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"finite elements\" \n# \"finite volumes\" \n# \"finite difference\" \n# \"centered finite difference\" \n# TODO - please enter value(s)\n",
"6.3. Scheme Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation function order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"second\" \n# \"third\" \n# \"fourth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.4. Horizontal Pole\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nHorizontal discretisation pole singularity treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"filter\" \n# \"pole rotation\" \n# \"artificial island\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"6.5. Grid Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal grid type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Gaussian\" \n# \"Latitude-Longitude\" \n# \"Cubed-Sphere\" \n# \"Icosahedral\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"7. Grid --> Discretisation --> Vertical\nAtmosphere discretisation in the vertical\n7.1. Coordinate Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nType of vertical coordinate system",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"isobaric\" \n# \"sigma\" \n# \"hybrid sigma-pressure\" \n# \"hybrid pressure\" \n# \"vertically lagrangian\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8. Dynamical Core\nCharacteristics of the dynamical core\n8.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere dynamical core",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the dynamical core of the model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.3. Timestepping Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTimestepping framework type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.timestepping_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Adams-Bashforth\" \n# \"explicit\" \n# \"implicit\" \n# \"semi-implicit\" \n# \"leap frog\" \n# \"multi-step\" \n# \"Runge Kutta fifth order\" \n# \"Runge Kutta second order\" \n# \"Runge Kutta third order\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.4. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of the model prognostic variables",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface pressure\" \n# \"wind components\" \n# \"divergence/curl\" \n# \"temperature\" \n# \"potential temperature\" \n# \"total water\" \n# \"water vapour\" \n# \"water liquid\" \n# \"water ice\" \n# \"total water moments\" \n# \"clouds\" \n# \"radiation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9. Dynamical Core --> Top Boundary\nType of boundary layer at the top of the model\n9.1. Top Boundary Condition\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTop boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"9.2. Top Heat\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary heat treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"9.3. Top Wind\nIs Required: TRUE Type: STRING Cardinality: 1.1\nTop boundary wind treatment",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Dynamical Core --> Lateral Boundary\nType of lateral boundary condition (if the model is a regional model)\n10.1. Condition\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nType of lateral boundary condition",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sponge layer\" \n# \"radiation boundary condition\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11. Dynamical Core --> Diffusion Horizontal\nHorizontal diffusion scheme\n11.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nHorizontal diffusion scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"11.2. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal diffusion scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"iterated Laplacian\" \n# \"bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Dynamical Core --> Advection Tracers\nTracer advection scheme\n12.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nTracer advection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Heun\" \n# \"Roe and VanLeer\" \n# \"Roe and Superbee\" \n# \"Prather\" \n# \"UTOPIA\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Eulerian\" \n# \"modified Euler\" \n# \"Lagrangian\" \n# \"semi-Lagrangian\" \n# \"cubic semi-Lagrangian\" \n# \"quintic semi-Lagrangian\" \n# \"mass-conserving\" \n# \"finite volume\" \n# \"flux-corrected\" \n# \"linear\" \n# \"quadratic\" \n# \"quartic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.3. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nTracer advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"dry mass\" \n# \"tracer mass\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12.4. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTracer advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Priestley algorithm\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Dynamical Core --> Advection Momentum\nMomentum advection scheme\n13.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMomentum advection schemes name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"VanLeer\" \n# \"Janjic\" \n# \"SUPG (Streamline Upwind Petrov-Galerkin)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Scheme Characteristics\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"2nd order\" \n# \"4th order\" \n# \"cell-centred\" \n# \"staggered grid\" \n# \"semi-staggered grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.3. Scheme Staggering Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme staggering type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa D-grid\" \n# \"Arakawa E-grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.4. Conserved Quantities\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nMomentum advection scheme conserved quantities",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Angular momentum\" \n# \"Horizontal momentum\" \n# \"Enstrophy\" \n# \"Mass\" \n# \"Total energy\" \n# \"Vorticity\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.5. Conservation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMomentum advection scheme conservation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"conservation fixer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14. Radiation\nCharacteristics of the atmosphere radiation process\n14.1. Aerosols\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nAerosols whose radiative effect is taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.aerosols') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"sulphate\" \n# \"nitrate\" \n# \"sea salt\" \n# \"dust\" \n# \"ice\" \n# \"organic\" \n# \"BC (black carbon / soot)\" \n# \"SOA (secondary organic aerosols)\" \n# \"POM (particulate organic matter)\" \n# \"polar stratospheric ice\" \n# \"NAT (nitric acid trihydrate)\" \n# \"NAD (nitric acid dihydrate)\" \n# \"STS (supercooled ternary solution aerosol particle)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15. Radiation --> Shortwave Radiation\nProperties of the shortwave radiation scheme\n15.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of shortwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"15.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nShortwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nShortwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nShortwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Radiation --> Shortwave GHG\nRepresentation of greenhouse gases in the shortwave radiation scheme\n16.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"16.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17. Radiation --> Shortwave Cloud Ice\nShortwave radiative properties of ice crystals in clouds\n17.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"17.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18. Radiation --> Shortwave Cloud Liquid\nShortwave radiative properties of liquid droplets in clouds\n18.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"18.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19. Radiation --> Shortwave Cloud Inhomogeneity\nCloud inhomogeneity in the shortwave radiation scheme\n19.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20. Radiation --> Shortwave Aerosols\nShortwave radiative properties of aerosols\n20.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"20.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the shortwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"21. Radiation --> Shortwave Gases\nShortwave radiative properties of gases\n21.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral shortwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22. Radiation --> Longwave Radiation\nProperties of the longwave radiation scheme\n22.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of longwave radiation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the longwave radiation scheme.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"22.3. Spectral Integration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLongwave radiation scheme spectral integration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"wide-band model\" \n# \"correlated-k\" \n# \"exponential sum fitting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.4. Transport Calculation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLongwave radiation transport calculation methods",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"two-stream\" \n# \"layer interaction\" \n# \"bulk\" \n# \"adaptive\" \n# \"multi-stream\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.5. Spectral Intervals\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nLongwave radiation scheme number of spectral intervals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"23. Radiation --> Longwave GHG\nRepresentation of greenhouse gases in the longwave radiation scheme\n23.1. Greenhouse Gas Complexity\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nComplexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CO2\" \n# \"CH4\" \n# \"N2O\" \n# \"CFC-11 eq\" \n# \"CFC-12 eq\" \n# \"HFC-134a eq\" \n# \"Explicit ODSs\" \n# \"Explicit other fluorinated gases\" \n# \"O3\" \n# \"H2O\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. ODS\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOzone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CFC-12\" \n# \"CFC-11\" \n# \"CFC-113\" \n# \"CFC-114\" \n# \"CFC-115\" \n# \"HCFC-22\" \n# \"HCFC-141b\" \n# \"HCFC-142b\" \n# \"Halon-1211\" \n# \"Halon-1301\" \n# \"Halon-2402\" \n# \"methyl chloroform\" \n# \"carbon tetrachloride\" \n# \"methyl chloride\" \n# \"methylene chloride\" \n# \"chloroform\" \n# \"methyl bromide\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.3. Other Flourinated Gases\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nOther flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"HFC-134a\" \n# \"HFC-23\" \n# \"HFC-32\" \n# \"HFC-125\" \n# \"HFC-143a\" \n# \"HFC-152a\" \n# \"HFC-227ea\" \n# \"HFC-236fa\" \n# \"HFC-245fa\" \n# \"HFC-365mfc\" \n# \"HFC-43-10mee\" \n# \"CF4\" \n# \"C2F6\" \n# \"C3F8\" \n# \"C4F10\" \n# \"C5F12\" \n# \"C6F14\" \n# \"C7F16\" \n# \"C8F18\" \n# \"c-C4F8\" \n# \"NF3\" \n# \"SF6\" \n# \"SO2F2\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24. Radiation --> Longwave Cloud Ice\nLongwave radiative properties of ice crystals in clouds\n24.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud ice crystals",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.2. Physical Reprenstation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"bi-modal size distribution\" \n# \"ensemble of ice crystals\" \n# \"mean projected area\" \n# \"ice water path\" \n# \"crystal asymmetry\" \n# \"crystal aspect ratio\" \n# \"effective crystal radius\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"24.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud ice crystals in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25. Radiation --> Longwave Cloud Liquid\nLongwave radiative properties of liquid droplets in clouds\n25.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with cloud liquid droplets",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud droplet number concentration\" \n# \"effective cloud droplet radii\" \n# \"droplet size distribution\" \n# \"liquid water path\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to cloud liquid droplets in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"geometric optics\" \n# \"Mie theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26. Radiation --> Longwave Cloud Inhomogeneity\nCloud inhomogeneity in the longwave radiation scheme\n26.1. Cloud Inhomogeneity\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod for taking into account horizontal cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Monte Carlo Independent Column Approximation\" \n# \"Triplecloud\" \n# \"analytic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27. Radiation --> Longwave Aerosols\nLongwave radiative properties of aerosols\n27.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with aerosols",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Physical Representation\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical representation of aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"number concentration\" \n# \"effective radii\" \n# \"size distribution\" \n# \"asymmetry\" \n# \"aspect ratio\" \n# \"mixing state\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.3. Optical Methods\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOptical methods applicable to aerosols in the longwave radiation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"T-matrix\" \n# \"geometric optics\" \n# \"finite difference time domain (FDTD)\" \n# \"Mie theory\" \n# \"anomalous diffraction approximation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"28. Radiation --> Longwave Gases\nLongwave radiative properties of gases\n28.1. General Interactions\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nGeneral longwave radiative interactions with gases",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"scattering\" \n# \"emission/absorption\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"29. Turbulence Convection\nAtmosphere Convective Turbulence and Clouds\n29.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of atmosphere convection and turbulence",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"30. Turbulence Convection --> Boundary Layer Turbulence\nProperties of the boundary layer turbulence scheme\n30.1. Scheme Name\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nBoundary layer turbulence scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Mellor-Yamada\" \n# \"Holtslag-Boville\" \n# \"EDMF\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBoundary layer turbulence scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TKE prognostic\" \n# \"TKE diagnostic\" \n# \"TKE coupled with water\" \n# \"vertical profile of Kz\" \n# \"non-local diffusion\" \n# \"Monin-Obukhov similarity\" \n# \"Coastal Buddy Scheme\" \n# \"Coupled with convection\" \n# \"Coupled with gravity waves\" \n# \"Depth capped at cloud base\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.3. Closure Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nBoundary layer turbulence scheme closure order",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.4. Counter Gradient\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nUses boundary layer turbulence scheme counter gradient",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"31. Turbulence Convection --> Deep Convection\nProperties of the deep convection scheme\n31.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDeep convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"31.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"adjustment\" \n# \"plume ensemble\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nDeep convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"CAPE\" \n# \"bulk\" \n# \"ensemble\" \n# \"CAPE/WFN based\" \n# \"TKE/CIN based\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of deep convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"vertical momentum transport\" \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"updrafts\" \n# \"downdrafts\" \n# \"radiative effect of anvils\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32. Turbulence Convection --> Shallow Convection\nProperties of the shallow convection scheme\n32.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nShallow convection scheme name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32.2. Scheme Type\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nshallow convection scheme type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mass-flux\" \n# \"cumulus-capped boundary layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.3. Scheme Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nshallow convection scheme method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"same as deep (unified)\" \n# \"included in boundary layer turbulence\" \n# \"separate diagnosis\" \n# TODO - please enter value(s)\n",
"32.4. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPhysical processes taken into account in the parameterisation of shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convective momentum transport\" \n# \"entrainment\" \n# \"detrainment\" \n# \"penetrative convection\" \n# \"re-evaporation of convective precipitation\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.5. Microphysics\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nMicrophysics scheme for shallow convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"tuning parameter based\" \n# \"single moment\" \n# \"two moment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"33. Microphysics Precipitation\nLarge Scale Cloud Microphysics and Precipitation\n33.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of large scale cloud microphysics and precipitation",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34. Microphysics Precipitation --> Large Scale Precipitation\nProperties of the large scale precipitation scheme\n34.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the large scale precipitation parameterisation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34.2. Hydrometeors\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPrecipitating hydrometeors taken into account in the large scale precipitation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"liquid rain\" \n# \"snow\" \n# \"hail\" \n# \"graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"35. Microphysics Precipitation --> Large Scale Cloud Microphysics\nProperties of the large scale cloud microphysics scheme\n35.1. Scheme Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name of the microphysics parameterisation scheme used for large scale clouds.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35.2. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nLarge scale cloud microphysics processes",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"mixed phase\" \n# \"cloud droplets\" \n# \"cloud ice\" \n# \"ice nucleation\" \n# \"water vapour deposition\" \n# \"effect of raindrops\" \n# \"effect of snow\" \n# \"effect of graupel\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36. Cloud Scheme\nCharacteristics of the cloud scheme\n36.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the atmosphere cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.2. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.3. Atmos Coupling\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nAtmosphere components that are linked to the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"atmosphere_radiation\" \n# \"atmosphere_microphysics_precipitation\" \n# \"atmosphere_turbulence_convection\" \n# \"atmosphere_gravity_waves\" \n# \"atmosphere_solar\" \n# \"atmosphere_volcano\" \n# \"atmosphere_cloud_simulator\" \n# TODO - please enter value(s)\n",
"36.4. Uses Separate Treatment\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDifferent cloud schemes for the different types of clouds (convective, stratiform and boundary layer)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.5. Processes\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProcesses included in the cloud scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.processes') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"entrainment\" \n# \"detrainment\" \n# \"bulk cloud\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36.6. Prognostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a prognostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.7. Diagnostic Scheme\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the cloud scheme a diagnostic scheme?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36.8. Prognostic Variables\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nList the prognostic variables used by the cloud scheme, if applicable.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"cloud amount\" \n# \"liquid\" \n# \"ice\" \n# \"rain\" \n# \"snow\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37. Cloud Scheme --> Optical Cloud Properties\nOptical cloud properties\n37.1. Cloud Overlap Method\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nMethod for taking into account overlapping of cloud layers",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"random\" \n# \"maximum\" \n# \"maximum-random\" \n# \"exponential\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"37.2. Cloud Inhomogeneity\nIs Required: FALSE Type: STRING Cardinality: 0.1\nMethod for taking into account cloud inhomogeneity",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38. Cloud Scheme --> Sub Grid Scale Water Distribution\nSub-grid scale water distribution\n38.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale water distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"38.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale water distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale water distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"38.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale water distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"39. Cloud Scheme --> Sub Grid Scale Ice Distribution\nSub-grid scale ice distribution\n39.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSub-grid scale ice distribution type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"prognostic\" \n# \"diagnostic\" \n# TODO - please enter value(s)\n",
"39.2. Function Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nSub-grid scale ice distribution function name",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"39.3. Function Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nSub-grid scale ice distribution function type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"39.4. Convection Coupling\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSub-grid scale ice distribution coupling with convection",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"coupled with deep\" \n# \"coupled with shallow\" \n# \"not coupled with convection\" \n# TODO - please enter value(s)\n",
"40. Observation Simulation\nCharacteristics of observation simulation\n40.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of observation simulator characteristics",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"41. Observation Simulation --> Isscp Attributes\nISSCP Characteristics\n41.1. Top Height Estimation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator ISSCP top height estimation methodUo",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"no adjustment\" \n# \"IR brightness\" \n# \"visible optical depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.2. Top Height Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator ISSCP top height direction",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"lowest altitude level\" \n# \"highest altitude level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42. Observation Simulation --> Cosp Attributes\nCFMIP Observational Simulator Package attributes\n42.1. Run Configuration\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator COSP run configuration",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Inline\" \n# \"Offline\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"42.2. Number Of Grid Points\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of grid points",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.3. Number Of Sub Columns\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of sub-cloumns used to simulate sub-grid variability",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"42.4. Number Of Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nCloud simulator COSP number of levels",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43. Observation Simulation --> Radar Inputs\nCharacteristics of the cloud radar simulator\n43.1. Frequency\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nCloud simulator radar frequency (Hz)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"43.2. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator radar type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"surface\" \n# \"space borne\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"43.3. Gas Absorption\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses gas absorption",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"43.4. Effective Radius\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nCloud simulator radar uses effective radius",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"44. Observation Simulation --> Lidar Inputs\nCharacteristics of the cloud lidar simulator\n44.1. Ice Types\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nCloud simulator lidar ice type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"ice spheres\" \n# \"ice non-spherical\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"44.2. Overlap\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nCloud simulator lidar overlap",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"max\" \n# \"random\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45. Gravity Waves\nCharacteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.\n45.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of gravity wave parameterisation in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"45.2. Sponge Layer\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSponge layer in the upper levels in order to avoid gravity wave reflection at the top.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.sponge_layer') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Rayleigh friction\" \n# \"Diffusive sponge layer\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.3. Background\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBackground wave distribution",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"continuous spectrum\" \n# \"discrete spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"45.4. Subgrid Scale Orography\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nSubgrid scale orography effects taken into account.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"effect on drag\" \n# \"effect on lifting\" \n# \"enhanced topography\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46. Gravity Waves --> Orographic Gravity Waves\nGravity waves generated due to the presence of orography\n46.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"46.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear mountain waves\" \n# \"hydraulic jump\" \n# \"envelope orography\" \n# \"low level flow blocking\" \n# \"statistical sub-grid scale variance\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nOrographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"non-linear calculation\" \n# \"more than two cardinal directions\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"includes boundary layer ducting\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"46.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47. Gravity Waves --> Non Orographic Gravity Waves\nGravity waves generated by non-orographic processes.\n47.1. Name\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCommonly used name for the non-orographic gravity wave scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"47.2. Source Mechanisms\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave source mechanisms",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"convection\" \n# \"precipitation\" \n# \"background spectrum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.3. Calculation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nNon-orographic gravity wave calculation method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"spatially dependent\" \n# \"temporally dependent\" \n# TODO - please enter value(s)\n",
"47.4. Propagation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave propogation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"linear theory\" \n# \"non-linear theory\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"47.5. Dissipation Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nNon-orographic gravity wave dissipation scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"total wave\" \n# \"single wave\" \n# \"spectral\" \n# \"linear\" \n# \"wave saturation vs Richardson number\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"48. Solar\nTop of atmosphere solar insolation characteristics\n48.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of solar insolation of the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"49. Solar --> Solar Pathways\nPathways for solar forcing of the atmosphere\n49.1. Pathways\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nPathways for the solar forcing of the atmosphere model domain",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_pathways.pathways') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"SW radiation\" \n# \"precipitating energetic particles\" \n# \"cosmic rays\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"50. Solar --> Solar Constant\nSolar constant and top of atmosphere insolation characteristics\n50.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of the solar constant.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"50.2. Fixed Value\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf the solar constant is fixed, enter the value of the solar constant (W m-2).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"50.3. Transient Characteristics\nIs Required: TRUE Type: STRING Cardinality: 1.1\nsolar constant transient characteristics (W m-2)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51. Solar --> Orbital Parameters\nOrbital parameters and top of atmosphere insolation characteristics\n51.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime adaptation of orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"fixed\" \n# \"transient\" \n# TODO - please enter value(s)\n",
"51.2. Fixed Reference Date\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nReference date for fixed orbital parameters (yyyy)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"51.3. Transient Method\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescription of transient orbital parameters",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"51.4. Computation Method\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nMethod used for computing orbital parameters.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Berger 1978\" \n# \"Laskar 2004\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"52. Solar --> Insolation Ozone\nImpact of solar insolation on stratospheric ozone\n52.1. Solar Ozone Impact\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDoes top of atmosphere insolation impact on stratospheric ozone?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"53. Volcanos\nCharacteristics of the implementation of volcanoes\n53.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview description of the implementation of volcanic effects in the atmosphere",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"54. Volcanos --> Volcanoes Treatment\nTreatment of volcanoes in the atmosphere\n54.1. Volcanoes Implementation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHow volcanic effects are modeled in the atmosphere.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"high frequency solar constant anomaly\" \n# \"stratospheric aerosols optical thickness\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
faneshion/MatchZoo
|
tutorials/quick_start.ipynb
|
apache-2.0
|
[
"<img src=\"../artworks/matchzoo-logo.png\" alt=\"logo\" style=\"width:600px;float: center\"/>\nMatchZoo Quick Start",
"import matchzoo as mz\nprint(mz.__version__)",
"Define Task\nThere are two types of tasks available in MatchZoo. mz.tasks.Ranking and mz.tasks.Classification. We will use a ranking task for this demo.",
"task = mz.tasks.Ranking()\nprint(task)",
"Prepare Data",
"train_raw = mz.datasets.toy.load_data(stage='train', task=task)\ntest_raw = mz.datasets.toy.load_data(stage='test', task=task)\n\ntype(train_raw)",
"DataPack is a MatchZoo native data structure that most MatchZoo data handling processes build upon. A DataPack is consists of three pandas.DataFrame:",
"train_raw.left.head()\n\ntrain_raw.right.head()\n\ntrain_raw.relation.head()",
"It is also possible to convert a DataPack into a single pandas.DataFrame that holds all information.",
"train_raw.frame().head()",
"However, using such pandas.DataFrame consumes much more memory if there are many duplicates in the texts, and that is the exact reason why we use DataPack. For more details about data handling, consult matchzoo/tutorials/data_handling.ipynb.\nPreprocessing\nMatchZoo preprocessors are used to convert a raw DataPack into a DataPack that ready to be fed into a model.",
"preprocessor = mz.preprocessors.BasicPreprocessor()",
"There are two steps to use a preprocessor. First, fit. Then, transform. fit will only changes the preprocessor's inner state but not the input DataPack.",
"preprocessor.fit(train_raw)",
"fit will gather useful information into its context, which will be used later in a transform or used to set hyper-parameters of a model.",
"preprocessor.context",
"Once fit, the preprocessor has enough information to transform. transform will not change the preprocessor's inner state and the input DataPack, but return a transformed DataPack.",
"train_processed = preprocessor.transform(train_raw)\ntest_processed = preprocessor.transform(test_raw)\n\ntrain_processed.left.head()",
"As we can see, text_left is already in sequence form that nerual networks love.\nJust to make sure we have the correct sequence:",
"vocab_unit = preprocessor.context['vocab_unit']\nprint('Orig Text:', train_processed.left.loc['Q1']['text_left'])\nsequence = train_processed.left.loc['Q1']['text_left']\nprint('Transformed Indices:', sequence)\nprint('Transformed Indices Meaning:',\n '_'.join([vocab_unit.state['index_term'][i] for i in sequence]))",
"For more details about preprocessing, consult matchzoo/tutorials/data_handling.ipynb.\nBuild Model\nMatchZoo provides many built-in text matching models.",
"mz.models.list_available()",
"Let's use mz.models.DenseBaseline for our demo.",
"model = mz.models.DenseBaseline()",
"The model is initialized with a hyper parameter table, in which values are partially filled. To view parameters and their values, use print.",
"print(model.params)",
"to_frame gives you more informartion in addition to just names and values.",
"model.params.to_frame()[['Name', 'Description', 'Value']]",
"To set a hyper-parameter:",
"model.params['task'] = task\nmodel.params['mlp_num_units'] = 3\nprint(model.params)",
"Notice that we are still missing input_shapes, and that information is store in the preprocessor.",
"print(preprocessor.context['input_shapes'])",
"We may use update to load a preprocessor's context into a model's hyper-parameter table.",
"model.params.update(preprocessor.context)",
"Now we have a completed hyper-parameter table.",
"model.params.completed()",
"With all parameters filled in, we can now build and compile the model.",
"model.build()\nmodel.compile()",
"MatchZoo models are wrapped over keras models, and the backend property of a model gives you the actual keras model built.",
"model.backend.summary()",
"For more details about models, consult matchzoo/tutorials/models.ipynb.\nTrain, Evaluate, Predict\nA DataPack can unpack itself into data that can be directly used to train a MatchZoo model.",
"x, y = train_processed.unpack()\ntest_x, test_y = test_processed.unpack()\n\nmodel.fit(x, y, batch_size=32, epochs=5)",
"An alternative to train a model is to use a DataGenerator. This is useful for delaying expensive preprocessing steps or doing real-time data augmentation. For some models that needs dynamic batch-wise information, using a DataGenerator is required. For more details about DataGenerator, consult matchzoo/tutorials/data_handling.ipynb.",
"data_generator = mz.DataGenerator(train_processed, batch_size=32)\n\nmodel.fit_generator(data_generator, epochs=5, use_multiprocessing=True, workers=4)\n\nmodel.evaluate(test_x, test_y)\n\nmodel.predict(test_x)",
"A Shortcut to Preprocessing and Model Building\nSince data preprocessing and model building are laborious and special setups of some models makes this even worse, MatchZoo provides prepare, a unified interface that handles interaction among data, model, and preprocessor automatically.\nMore specifically, prepare does these following things:\n - create a default preprocessor of the model class (if not given one)\n - fit the preprocessor using the raw data\n - create an embedding matrix\n - instantiate a model and fill in hype-parameters\n - build the model\n - instantiate a DataGeneratorBuilder that will build a correctly formed DataGenerator given a DataPack\nIt also does many special handling for specific models, but we will not go into the details of that here.",
"for model_class in mz.models.list_available():\n print(model_class)\n model, preprocessor, data_generator_builder, embedding_matrix = mz.auto.prepare(\n task=task,\n model_class=model_class,\n data_pack=train_raw,\n )\n train_processed = preprocessor.transform(train_raw, verbose=0)\n test_processed = preprocessor.transform(test_raw, verbose=0)\n train_gen = data_generator_builder.build(train_processed)\n test_gen = data_generator_builder.build(test_processed)\n model.fit_generator(train_gen, epochs=1)\n model.evaluate_generator(test_gen)\n print()",
"Save and Load the Model",
"model.save('my-model')\nloaded_model = mz.load_model('my-model')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
marius311/cosmoslik
|
cosmoslik_plugins/likelihoods/planck/clik.ipynb
|
gpl-3.0
|
[
"Planck (via clik)\nThis plugin is an interface between the Planck likelihood code clik and CosmoSlik. You need clik already installed on your machine, which you can get from here. \nYou also need to download the \"clik files\" for whichever likelihoods you would like to use. You can find these here under \"Likelihoods\" / \"Notes\". \nQuickstart\nCosmoSlik provides several plugins which wrap clik and have all the necessary nuisance parameters set up for particular data files. You can use them in your script by adding something like the following to your __init__, \n```python\nset up cosmological params and solver\nself.cosmo = models.cosmology(\"lcdm\")\nself.cmb = models.classy()\nload Planck clik file and set up nuisance parameters\nself.clik = likelihoods.planck.planck_2015_highl_TT(\n clik_file=\"plik_dx11dr2_HM_v18_TT.clik/\",\n)\n```\nthen compute the likelihood in __call__ by calling clik with a parameter cmb of the kind returned by CAMB or CLASS,\n```python\ncompute likelihood\nself.clik(self.cmb(**self.cosmo))\n```\nThe generic clik wrapper\nUsing the SlikPlugin named clik, we can load up any generic clik file. Supposing we've downloaded the file plik_lite_v18_TT.clik, we can load it in via,",
"%pylab inline\nsys.path = sys.path[1:]\nfrom cosmoslik import *\n\nclik = likelihoods.planck.clik(\n clik_file=\"plik_lite_v18_TT.clik/\",\n A_Planck=1\n)\nclik",
"Note that we gave it a parameter A_Planck. Most clik files have extra nuisance parameters, which you can list (for a given file) with,",
"clik.clik.get_extra_parameter_names()",
"You should attach parametes with these names to the clik object as we have done above (usually in a script these will be sampled parameters). \nWith the clik object created, we can call it to compute the likelihood. The function expects a parameter cmb of the kind returned by CAMB or CLASS.",
"cmb = models.classy(lmax=3000)()\ncmb",
"Here's the negative log likelihood:",
"clik(cmb)",
"Putting it all together, a simple script which runs this likelihood would look like:",
"class planck(SlikPlugin):\n\n def __init__(self, **kwargs):\n super().__init__()\n \n # load Planck clik file and set up nuisance parameters\n self.clik = likelihoods.planck.clik(\n clik_file=\"plik_lite_v18_TT.clik/\",\n \n # sample over nuisance parameter\n A_Planck=param(start=1, scale=0.0025, gaussian_prior=(1,0.0025))\n )\n \n # set up cosmological params and solver\n self.cosmo = models.cosmology(\"lcdm\")\n self.cmb = models.classy(lmax=3000)\n \n self.sampler = samplers.metropolis_hastings(self)\n\n def __call__(self):\n # compute likelihood\n return self.clik(self.cmb(**self.cosmo))\n\ns = Slik(planck())\nlnl, e = s.evaluate(**s.get_start())\nlnl",
"Ready-to-go wrappers for specific clik files\nThe previous example was easy because there was one single nuisance parameter, A_Planck. Other clik files have many more nuisance parameters, which must all be sampled over and in some cases have the right priors applied (which you can read about here), otherwise you will not get the right answer. \nThis is, of course, a huge pain. \nFor this reason, CosmoSlik comes with several SlikPlugins already containing the correct sampled nuisance parameters for many of these clik files, making writing a script extremely easy. For example, here is the source code for one such plugin, planck_2015_highl_TT:\n```python\nparam = param_shortcut('start','scale')\nclass planck_2015_highl_TT(clik):\ndef __init__(\n self,\n clik_file,\n A_cib_217 = param(60, 10, range=(0,200)),\n A_planck = param(1, 0.0025, range=(0.9,1.1), gaussian_prior=(1,0.0025)),\n A_sz = param(5, 3, range=(0,10)),\n calib_100T = param(1, 0.001, range=(0,3), gaussian_prior=(0.999,0.001)),\n calib_217T = param(1, 0.002, range=(0,3), gaussian_prior=(0.995,0.002)),\n cib_index = -1.3, \n gal545_A_100 = param(7, 2, range=(0,50), gaussian_prior=(7,2)),\n gal545_A_143 = param(9, 2, range=(0,50), gaussian_prior=(9,2)),\n gal545_A_143_217 = param(21, 8.5, range=(0,100), gaussian_prior=(21,8.5)),\n gal545_A_217 = param(80, 20, range=(0,400), gaussian_prior=(80,20)),\n ksz_norm = param(2, 3, range=(0,10)),\n ps_A_100_100 = param(250, 30, range=(0,4000)),\n ps_A_143_143 = param(45, 10, range=(0,4000)),\n ps_A_143_217 = param(40, 10, range=(0,4000)),\n ps_A_217_217 = param(90, 15, range=(0,4000)),\n xi_sz_cib = param(0.5, 0.3, range=(0,1)),\n):\n super().__init__(**arguments())\n\n```\nAs you can see, all the sampled parameters as automatically set, including ranges and priors. The script to use this likelihood is then extremely simple:",
"class planck(SlikPlugin):\n\n def __init__(self):\n super().__init__()\n \n # load Planck clik file and set up nuisance parameters\n self.clik = likelihoods.planck.planck_2015_highl_TT(\n clik_file=\"plik_dx11dr2_HM_v18_TT.clik/\",\n )\n \n # set up cosmological params and solver\n self.cosmo = models.cosmology(\"lcdm\")\n self.cmb = models.classy(lmax=3000)\n \n self.sampler = samplers.metropolis_hastings(self)\n\n def __call__(self):\n # compute likelihood\n return self.clik(self.cmb(**self.cosmo))\n\ns = Slik(planck())\nlnl, e = s.evaluate(**s.get_start())\nlnl",
"Common calibration parameters\nDespite that the Planck likelihood is broken up into different pieces, they sometimes share the same calibration parameters. To apply this correctly in your script, just define one single sampled calibration parameter, then in your __call__, set it across all the different likelihoods.",
"class planck(SlikPlugin):\n\n def __init__(self):\n super().__init__()\n \n # set up low and high L likelihood\n self.highl = likelihoods.planck.planck_2015_highl_TT(\n clik_file=\"plik_dx11dr2_HM_v18_TT.clik/\",\n )\n self.lowl = likelihoods.planck.planck_2015_lowl_TT(\n clik_file=\"commander_rc2_v1.1_l2_29_B.clik/\",\n A_planck=None, #turn off this cal parameter, use the one from self.highl\n )\n \n # set up cosmological params and solver\n self.cosmo = models.cosmology(\"lcdm\")\n self.cmb = models.classy(lmax=3000)\n \n self.sampler = samplers.metropolis_hastings(self)\n\n def __call__(self):\n # set the calibration parameters the same\n self.lowl.A_planck = self.highl.A_planck \n \n # compute likelihood\n cmb = self.cmb(**self.cosmo)\n return self.lowl(cmb) + self.highl(cmb)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
SJSlavin/phys202-2015-work
|
assignments/assignment07/AlgorithmsEx01.ipynb
|
mit
|
[
"Algorithms Exercise 1\nImports",
"%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np",
"Word counting\nWrite a function tokenize that takes a string of English text returns a list of words. It should also remove stop words, which are common short words that are often removed before natural language processing. Your function should have the following logic:\n\nSplit the string into lines using splitlines.\nSplit each line into a list of words and merge the lists for each line.\nUse Python's builtin filter function to remove all punctuation.\nIf stop_words is a list, remove all occurences of the words in the list.\nIf stop_words is a space delimeted string of words, split them and remove them.\nRemove any remaining empty words.\nMake all words lowercase.",
"file = open(\"mobydick_chapter1.txt\")\nmobydick = file.read()\n\nmobydick = mobydick.splitlines()\nmobydick = \" \".join(mobydick)\n\npunctuation = [\"-\", \",\", \".\"]\n\nmobydick = list(mobydick)\nmobydick_f = list(filter(lambda c: c not in punctuation, mobydick))\n\nmobydick_f = \"\".join(mobydick_f)\n\nstop_words = [\"of\", \"or\", \"in\"]\n\nmobydick_fs = mobydick_f.split()\nmobydick_fs = list(filter(lambda w: w not in stop_words, mobydick_fs))\n\n\n\n \n\n\n \nprint(mobydick_fs)\n\n\n\n\nphrase = ['the cat', 'ran away']\n' '.join(phrase).split(' ')\n\ndef tokenize(s, stop_words=None, punctuation='`~!@#$%^&*()_-+={[}]|\\:;\"<,>.?/}\\t'):\n s = s.splitlines()\n s = \" \".join(s)\n \n punctuation_l = list(punctuation)\n\n s = list(s)\n s_f = list(filter(lambda c: c not in punctuation, s))\n\n s_f = \"\".join(s_f)\n \n stop_words_l = []\n\n #http://stackoverflow.com/questions/402504/how-to-determine-the-variable-type-in-python\n if type(stop_words) is str:\n stop_words_l = stop_words.split(\" \")\n elif type(stop_words) is list:\n stop_words_l = stop_words\n else:\n stop_words_l = []\n\n s_fs = s_f.split()\n s_fs = list(filter(lambda w: w not in stop_words_l, s_fs))\n s_fs = [w.lower() for w in s_fs]\n \n return s_fs\n\nprint()\n\nassert tokenize(\"This, is the way; that things will end\", stop_words=['the', 'is']) == \\\n ['this', 'way', 'that', 'things', 'will', 'end']\nwasteland = \"\"\"\nAPRIL is the cruellest month, breeding\nLilacs out of the dead land, mixing\nMemory and desire, stirring\nDull roots with spring rain.\n\"\"\"\n\nassert tokenize(wasteland, stop_words='is the of and') == \\\n ['april','cruellest','month','breeding','lilacs','out','dead','land',\n 'mixing','memory','desire','stirring','dull','roots','with','spring',\n 'rain']",
"Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.",
"def count_words(data):\n count = {}\n for w in range(0, len(data)):\n if data[w] in count:\n count[data[w]] += 1\n else:\n count[data[w]] = 1\n \n #this does not sort correctly, and from what I can tell, dictionaries can't be sorted anyway\n return(count)\n \n\nassert count_words(tokenize('this and the this from and a a a')) == \\\n {'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}",
"Write a function sort_word_counts that return a list of sorted word counts:\n\nEach element of the list should be a (word, count) tuple.\nThe list should be sorted by the word counts, with the higest counts coming first.\nTo perform this sort, look at using the sorted function with a custom key and reverse\n argument.",
"def sort_word_counts(wc):\n \"\"\"Return a list of 2-tuples of (word, count), sorted by count descending.\"\"\"\n wordlist = []\n n = 0\n for w in wc:\n wordlist.append((w, wc[w]))\n \n #http://stackoverflow.com/questions/3121979/how-to-sort-list-tuple-of-lists-tuples\n wordlist_s = sorted(wordlist, key=lambda tup: tup[1], reverse=True)\n print(wordlist_s)\n return(wordlist_s)\n \n\nassert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \\\n [('a', 4), ('this', 3), ('and', 2), ('the', 1)]",
"Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt:\n\nRead the file into a string.\nTokenize with stop words of 'the of and a to in is it that as'.\nPerform a word count, the sort and save the result in a variable named swc.",
"# YOUR CODE HERE\nfile = open(\"mobydick_chapter1.txt\")\nmobydick = file.read()\n\nmobydick_t = tokenize(mobydick, stop_words = \"the of and a to in is it that as\")\n\nmobydick_wc = count_words(mobydick_t)\n\nswc = sort_word_counts(mobydick_wc)\n\nassert swc[0]==('i',43)\nassert len(swc)==848",
"Create a \"Cleveland Style\" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...",
"# YOUR CODE HERE\nwords, freq = zip(*swc) \nplt.bar(np.arange(len(words)), freq, linestyle=\"dotted\")\n\nplt.title(\"Word Frequency\")\nplt.xlabel(\"Word\")\nplt.ylabel(\"Frequency\")\n\n#plt.xticks(words)\n#couldn't figure out how to format the plot correctly\n\n\n# YOUR CODE HERE\nwords, freq = zip(*swc) \nplt.scatter(freq, np.arange(0, len(words), -1))\n\nplt.title(\"Word Frequency\")\nplt.xlabel(\"Word\")\nplt.ylabel(\"Frequency\")\n\n#plt.xticks(words)\n#couldn't figure out how to format the plot correctly\n\n\nassert True # use this for grading the dotplot"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
amniskin/amniskin.github.io
|
assets/notebooks/2017/04/.ipynb_checkpoints/18_mcmc-checkpoint.ipynb
|
mit
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.pylab as plb\nimport seaborn\nimport pandas as pd\n\nnp.random.seed(1234)\n\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"last_expr\"\n\n%%bash\nif [ ! -d tmp ]\nthen\nmkdir tmp\nfi",
"$$\\text{???} = (MC)^2$$\nBackground\nWhat if we know the relative likelihood, but want the probability distribution?\n$$\\mathbb{P}(X=x) = \\frac{f(x)}{\\int_{-\\infty}^\\infty f(x)dx}$$\nBut what if $\\int f(x)dx$ is hard, or you can't sample from $f$ directly?\nThis is the problem we will be trying to solve.\nFirst approach\nIf space if bounded (integral is between $a,b$) we can use Monte Carlo to estimate $\\int\\limits_a^b f(x)dx$\n\nPick $\\alpha \\in (a,b)$\nCompute $f(\\alpha)/(b-a)$\nRepeat as necessary\nCompute the expected value of the computed values\n\nWhat if hard?\nWhat if we can't sample from $f(x)$ but can only determine likelihood ratios?\n$$\\frac{f(x)}{f(y)}$$\nMarkov Chain Monte Carlo\nThe obligatory basics\nA Markov Chain is a stochastic process (a collection of indexed random variables) such that $\\mathbb{P}(X_n=x|X_0,X_1,...,X_{n-1}) = \\mathbb{P}(X_n=x|X_{n-1})$.\nIn other words, the conditional probabilities only depend on the last state, not on any deeper history.\nWe call the set of all possible values of $X_i$ the state space and denote it by, $\\chi$.\nTransition Matrix\nLet $p_{ij} = \\mathbb{P}(X_1 = j | X_0 = i)$. We call the matrix $(p_{ij})$ the Transition Matrix of $X$ and denote it $P$.\nLet $\\mu_n = \\left(\\mathbb{P}(X_n=0), \\mathbb{P}(X_n=1),..., \\mathbb{P}(X_n=l)\\right)$ be the row vector corresponding to the \"probabilities\" of being at each state at the $n$th point in time (iteration).\nClaim: $\\mu_{i+1} = \\mu_0P^{i+1}$\n$$\\begin{align}\n(\\mu_0 P)j =& \\sum\\limits{i=1}^l\\mu_0(i)p_{ij}\\\n=&\\sum\\limits_{i=1}^l\\mathbb{P}(X_0=i)\\mathbb{P}(X_1=j | X_0=i)\\\n=&\\mathbb{P}(X_1 = j)\\\n=&\\mu_1(j)\n\\end{align}$$\nSo: $\\mu_1 = \\mu_0P$.\nStationary Distributions\nWe say that a distribution $\\pi$ is stationary if\n$$\\begin{gather}\\pi P = \\pi\\end{gather}$$\nLimiting Distribution?\nMain Theorem:\nAn irreducible, ergotic Markov Chain ${X_n}$ has a unique stationary distribution, $\\pi$. The limiting distribution exists and is equal to $\\pi$. And furthermore, if $g$ is any bounded function, then with probability 1:\n$$\\lim\\limits_{N\\to\\infty}\\frac{1}{N}\\sum\\limits_{n=1}^Ng(X_n) \\rightarrow E_\\pi(g)$$\nRandom-Walk-Metropolis - Hastings:\nLet $f(x)$ be the relative likelihood function of our desired distribution.\n$q(y|x_i)$ known distribution easily sampled from (generally taken to be $N(x_i,b^2)$)\n1) Given $X_0,X_1,...,X_i$, pick $Y \\sim q(y|X_i)$\n2) Compute $r(X_i,Y) = \\min\\left(\\frac{f(Y)q(X_i|Y)}{f(X_i)q(Y|X_i)}, 1\\right)$\n3) Pick $a \\sim U(0,1)$\n4) Set $X_{i+1} = \\begin{cases} Y & \\text{if } a < r \\\nX_i & \\text{otherwise}\\end{cases}$\nThe Confidence Builder:\nWe would like to sample from and obtain a histogram of the Cauchy distribution:\n$$f(x) = \\frac{1}{\\pi}\\frac{1}{1+x^2}$$\n$$\\begin{align}\nq_{01} \\sim& N(x,0.1)\\\nq_1 \\sim& N(x,1)\\\nq_{10} \\sim& N(x,10)\\end{align}$$\nNote: Since $q$ is symmetric, $q(x|y) = q(y|x)$!",
"def metropolis_hastings(f, q, initial_state, num_iters):\n \"\"\"\n Generate a Markov Chain Monte Carlo using\n the Metropolis-Hastings algorithm.\n \n Parameters\n ----------\n f : function\n the [relative] likelood function for\n the distribution we would like to\n approximate\n q : function\n The conditional distribution to be\n sampled from (given an X_i, sample\n from q(X_i) to get potential X_i+1)\n initial_state : type accepted by f,q\n The initial state. This state will\n not be included as part of the \n Markov Chain.\n num_iters : int or float\n the number of desired iterations\n float is included to facilitate\n 1e5 type use\n \n Returns\n -------\n out : Python Array\n Array where out[i] = X_{i-1} because\n X_0 (the initial state) is not included\n \"\"\"\n MC = []\n X_i = initial_state\n for i in range(int(num_iters)):\n Y = q(X_i)\n r = min(f(Y)/f(X_i), 1)\n a = np.random.uniform()\n if a < r:\n X_i = Y\n MC.append(X_i)\n return MC\n\ndef metropolis_hastings(f, q, initial_state, num_iters):\n MC = []\n X_i = initial_state\n for i in range(int(num_iters)):\n Y = q(X_i)\n r = min(f(Y)/f(X_i), 1)\n a = np.random.uniform()\n if a < r:\n X_i = Y\n MC.append(X_i)\n return MC\n\ndef cauchy_dist(x):\n return 1/(np.pi*(1 + x**2))\ndef q(scale):\n return lambda x: np.random.normal(loc=x, scale=scale)\n\nfrom scipy.stats import cauchy\nCauchyInterval = np.linspace(cauchy.ppf(0.01),\n cauchy.ppf(0.99),\n 100);\n\nstd01 = metropolis_hastings(cauchy_dist, q(0.1), 0, 1000)\ntmpHist = plt.hist(std01, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.show();\n\nstd1 = metropolis_hastings(cauchy_dist, q(1), 0, 1000)\ntmpHist = plt.hist(std1, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.show();\n\nstd10 = metropolis_hastings(cauchy_dist, q(10), 0, 1000)\nplt.subplots()\ntmpHist = plt.hist(std10, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.show()\n\nplt.figure(1);\nplt.subplot(3,1,1);\nplt.title(\"STD = 0.1\")\ntmpHist = plt.hist(std01, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.subplot(3,1,2)\nplt.title(\"STD = 1\")\ntmpHist = plt.hist(std1, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.subplot(3,1,3)\nplt.title(\"STD = 10\")\ntmpHist = plt.hist(std10, bins=20, normed=True);\ntmpLnSp = np.linspace(min(tmpHist[1]),\n max(tmpHist[1]),100)\nplt.plot(tmpLnSp, cauchy.pdf(tmpLnSp), 'r-')\nplt.tight_layout()\nplb.savefig(\"tmp/18-MCMC-Cauchy-Estimation.png\")\nplb.savefig(\"../../../pics/2017/04/18-MCMC-Cauchy-Estimation.png\");\nplt.show()",
"Estimation:\n$$q(y|x) \\sim N(0,b^2)$$",
"plt.figure(2)\nplt.subplot(3,1,1)\nplt.title(\"STD = 0.1\")\nplt.plot(std01)\nplt.subplot(3,1,2)\nplt.title(\"STD = 1\")\nplt.plot(std1)\nplt.subplot(3,1,3)\nplt.title(\"STD = 10\")\nplt.plot(std10)\nplt.tight_layout()\nplb.savefig(\"tmp/18-MCMC-Cauchy-Estimation_TS.png\")\nplb.savefig(\"../../../pics/2017/04/18-MCMC-Cauchy-Estimation_TS.png\")\nplt.show()",
"Another Aspect\n\nHow is this happening???\nA property called detailed balance, which means,\n$$\\pi_ip_{ij} = p_{ji}\\pi_j$$\nor in the continuous case:\n$$f(x)P_{xy} = f(y)P_{yx}$$\nBut we don't need to go over that... Unless you wanna...\nProof?\nLet $f$ be the desired distribution (in our example, it was the Cauchy Distribution), and let $q(y|x)$ be the distribution we draw from.\nFirst we'll show that detailed balance implies $\\pi$ (or $f$ if continuous) is the stable distribution!\nLet $\\pi_ip_{ij} = \\pi_jp_{ji}$ for discrete or for continuous $f(i)P_{ij}=P_{ji}f(j)$\n$$\\begin{align}\n(\\pi P)i =& \\sum\\limits_j\\pi_jP{ji} & (fP)(x)=&\\int f(y)p(x|y)dy\\\n=&\\sum\\limits_j\\pi_iP_{ij} & =& \\int f(x)p(y|x)dy\\\n=&\\pi_i\\sum\\limits_jP_{ij} & =& f(x)\\int p(y|x)dy\\\n=&\\pi_i & =& f(x)\n\\end{align}$$\nNow we'll show that our MCMC has the detailed balance property.\nWLOG: Assume $f(x)q(y|x) > f(y)q(x|y)$.\nNote: $r(x,y) = \\frac{f(y)q(x|y)}{f(x)q(y|x)}$, and $r(y,x) = 1$.\nThen,\n$$\\begin{align}\n\\pi_xp_{xy} =& f(x)\\mathbb{P}(X_1=y|X_0=x) & \\pi_yp_{yx} =& f(y)\\mathbb{P}(X_1=x|X_0=y)\\\n=& f(x)\\left[q(y|x)\\cdot r(x,y)\\right] & =&f(y)\\left[q(x|y)\\cdot r(y,x)\\right]\\\n=& f(x)\\left[q(y|x)\\cdot \\frac{f(y)q(x|y)}{f(x)q(y|x)}\\right] & =&f(y)\\left[q(x|y)\\cdot 1\\right]\\\n=& f(y)q(x|y) & =&f(y)q(x|y)\n\\end{align}$$\nThe intuition\nWe want equal hopportunity:\n$$f(x)p(y|x) = f(y)p(x|y)$$\n$$r(x,y) = \\min\\left(\\frac{f(y)q(x|y)}{f(x)q(y|x)},1\\right)$$\nModeling Change Point Models in Astrostatistics.\n$$\n\\begin{align}\nf(k,\\theta,\\lambda,b_1,b_2 | Y) \\alpha& \\prod\\limits_{i=1}^k\\frac{\\theta^{Y_i}e^{-\\theta}}{Y_i!} \\prod\\limits_{i=k+1}^n\\frac{\\lambda^{Y_i}e^{-\\lambda}}{Y_i!} \\\n&\\times\\frac{1}{\\Gamma(0.5)b_1^{0.5}}\\theta^{-0.5}e^{-\\theta/b_1} \\times\\frac{1}{\\Gamma(0.5)b_2^{0.5}}\\theta^{-0.5}e^{-\\theta/b_2}\\\n&\\times\\frac{e^{-1/b_1}}{b_1}\\frac{e^{-1/b_2}}{b_2}\\times \\frac{1}{n}\n\\end{align}\n$$",
"def psu_mcmc(X, q, numIters=10000):\n theta, lambd, k, b1, b2 = 1, 1, 20, 1, 1\n thetas, lambds, ks, b1s, b2s = [], [], [], [], []\n n = len(X)\n def f_k(theta, lambd, k, b1, b2):\n if 0 <= k and k <= n:\n return theta**sum(X[:k])*lambd**sum(X[k:])*np.exp(-k*theta-(n-k)*lambd)\n elif k < 0:\n return lambd**sum(X)*np.exp(-n*lambd)\n elif k > n:\n return theta**sum(X)*np.exp(-n*theta)\n def f_t(theta, k, b1):\n return theta**(sum(X[:k])+0.5)*np.exp(-theta*(k+1.0)/b1)\n def f_l(lambd, k, b2):\n return lambd**(sum(X[k:])+0.5)*np.exp(-lambd*((n-k)+1.0)/b2)\n def f_b(b, par):\n return np.exp(-(1 + par) / b) / (b*np.sqrt(b))\n for i in range(numIters):\n tmp = q(theta)\n if tmp < np.infty:\n r = min(1, f_t(tmp,k,b1)/f_t(theta,k,b1))\n if np.random.uniform(0,1) < r:\n theta = tmp\n tmp = q(lambd)\n if tmp < np.infty:\n r = min(1, f_l(tmp,k,b2)/f_l(lambd,k,b2))\n if np.random.uniform(0,1) < r:\n lambd = tmp\n tmp = q(b1)\n if tmp < np.infty:\n r = min(1, f_b(tmp, theta)/f_b(b1, theta))\n if np.random.uniform(0,1) < r:\n b1 = tmp\n tmp = q(b2)\n if tmp < np.infty:\n r = min(1, f_b(tmp, lambd)/f_b(b2, lambd))\n if np.random.uniform(0,1) < r:\n b2 = tmp\n tmp = q(k)\n if tmp < np.infty:\n r = min(1, f_k(theta, lambd, tmp, b1, b2) /\n f_k(theta, lambd, k, b1,b2))\n if np.random.uniform(0,1) < r:\n k = tmp\n thetas.append(theta)\n lambds.append(lambd)\n b1s.append(b1)\n b2s.append(b2)\n ks.append(k)\n return np.array([thetas,lambds,ks,b1s,b2s])\n\n%%bash\nif [ ! -f tmp/psu_data.tsv ]\nthen\nwget http://sites.stat.psu.edu/~mharan/MCMCtut/COUP551_rates.dat -O tmp/psu_data.tsv\nfi\n\npsu_data = []\nwith open(\"tmp/psu_data.tsv\", \"r\") as f:\n title = f.readline()\n for line in f:\n tmpArr = [x.strip() for x in line.split(\" \")]\n psu_data.append([int(x) for x in tmpArr if x != \"\"][1])\npsu_data = np.array(psu_data)\npsu_data\n\nmcmc2 = psu_mcmc(psu_data, q(1), 1000)\nplt.figure()\nplt.subplot(2,1,1)\nplt.hist(mcmc2[2] % len(psu_data), normed=True)\nplt.subplot(2,1,2)\nplt.plot(mcmc2[2])\nplt.show();\n\nfig = plt.figure()\nfig.suptitle(\"MCMC values for Change Point\")\nplt.subplot(2,1,1)\nplt.hist(mcmc2[2] % len(psu_data), normed=True)\nplt.subplot(2,1,2)\nplt.plot(mcmc2[2])\nplb.savefig(\"tmp/psu_graphs1.png\")\nplt.show()",
"",
"plt.plot(psu_data)\nplt.title(\"PSU Data\")\nplb.savefig(\"tmp/psu_ts.png\")\nplt.show()",
"stat.psu.edu\nPotential Issues with RWMetropolis-Hastings:\n\nRequires $f$ to be defined on all of $\\mathbb{R}$\nSo transform as needed\n\n\nCurse of dimensionality in tuning parameters\n\nOther forms\n\n\nGibbs Sampling\n\nTurn high dimensional sampling into iterative one-dimensional sampling\n\n\n\nGibbs with Metropolis-Hastings\n\n\nBibliography\nSummer School in Astrostatistics"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
evanmiltenburg/python-for-text-analysis
|
Chapters/Chapter 11 - Functions and scope.ipynb
|
apache-2.0
|
[
"Chapter 11: Functions and scope\nWe use an example from this website to show you some of the basics of writing a function. \nWe use some materials from this other Python course.\nWe have seen that Python has several built-in functions (e.g. print() or max()). But you can also create a function. A function is a reusable block of code that performs a specific task. Once you have defined a function, you can use it at any place in your Python script. You can even import a function from an external module (as we will see in the next chapter). Therefore, they are beneficial for tasks that you will perform more often. Plus, functions are a convenient way to order your code and make it more readable!\nAt the end of this chapter, you will be able to:\n\nwrite a function\nwork with function inputs\nunderstand the difference between (keyword and positional) arguments and parameters\nreturn zero, one, or multiple values\nwrite function docstrings\nunderstand the scope of variables\nstore your function in a Python module and call it\ndebug your functions\n\nIf you want to learn more about these topics, you might find the following link useful:\n\nTutorial: Defining Functions of your Own\nThe docstrings main formats\nPEP 287 -- reStructured Docstring Format\nIntroduction to assert\n\nNow let's get started!\nIf you have questions about this chapter, please contact us (cltl.python.course@gmail.com).\n1. Writing a function\nA function is an isolated chunk of code that has a name, gets zero or more parameters, and returns a value. In general, a function will do something for you based on the input parameters you pass it, and it will typically return a result. You are not limited to using functions available in the standard library or the ones provided by external parties. You can also write your own functions!\nWhenever you are writing a function, you need to think of the following things:\n* What is the purpose of the function?\n* How should I name the function?\n* What input does the function need?\n* What output should the function generate?\n1.1. Why use a function?\nThere are several good reasons why functions are a vital component of any non-ridiculous programmer:\n\nencapsulation: wrapping a piece of useful code into a function so that it can be used without knowledge of the specifics\ngeneralization: making a piece of code useful in varied circumstances through parameters\nmanageability: Dividing a complex program up into easy-to-manage chunks\nmaintainability: using meaningful names to make the program better readable and understandable\nreusability: a good function may be useful in multiple programs\nrecursion!\n\n1.2. How to define a function\nLet's say we want to sing a birthday song to Emily. Then we print the following lines:",
"print(\"Happy Birthday to you!\")\nprint(\"Happy Birthday to you!\")\nprint(\"Happy Birthday, dear Emily.\")\nprint(\"Happy Birthday to you!\")",
"This could be the purpose of a function: to print the lines of a birthday song for Emily. \nNow, we define a function to do this. Here is how you define a function:\n\nwrite def;\nthe name you would like to call your function;\na set of parentheses containing the parameter(s) of your function;\na colon;\na docstring describing what your function does;\nthe function definition;\nending with a return statement\n\nStatements must be indented so that Python knows what belongs in the function and what not. Functions are only executed when you call them. It is good practice to define your functions at the top of your program or in another Python module.\nWe give the function a clear name, happy_birthday_to_emily, and we define the function as shown below. Note that we specify what it does in the docstring at the beginning of the function:",
"def happy_birthday_to_emily(): # Function definition\n \"\"\"\n Print a birthday song to Emily.\n \"\"\"\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday, dear Emily.\")\n print(\"Happy Birthday to you!\")",
"If we execute the code above, we don't get any output. That's because we only told Python: \"Here's a function to do this, please remember it.\" If we actually want Python to execute everything inside this function, we have to call it:\n1.3 How to call a function\nIt is important to distinguish between a function definition and a function call. We illustrate this in 1.3.1. You can also call functions from within other functions. This will become useful when you split up your code into small chunks that can be combined to solve a larger problem. This is illustrated in 1.3.2. \n1.3.1) A simple function call\nA function is defined once. After the definition, Python has remembered what this function does in its memory.\nA function is executed/called as many times as we like. When calling a function, you should always use parenthesis.",
"# function definition:\n\ndef happy_birthday_to_emily(): # Function definition\n \"\"\"\n Print a birthday song to Emily.\n \"\"\"\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday, dear Emily.\")\n print(\"Happy Birthday to you!\")\n \n# function call:\n\nprint('Function call 1')\n\nhappy_birthday_to_emily()\n\nprint()\n# We can call the function as many times as we want (but we define it only once)\nprint('Function call 2')\n\nhappy_birthday_to_emily()\n\nprint()\n\nprint('Function call 3')\n\nhappy_birthday_to_emily()\n\nprint()\n# This will not call the function \n\nprint('This is not a function call')\nhappy_birthday_to_emily",
"1.3.2 Calling a function from within another function\nWe can also define functions that call other functions, which is very helpful if we want to split our task into smaller, more manageable subtasks:",
"def new_line():\n \"\"\"Print a new line.\"\"\"\n print()\n\ndef two_new_lines():\n \"\"\"Print two new lines.\"\"\"\n new_line()\n new_line()\n\nprint(\"Printing a single line...\")\nnew_line()\nprint(\"Printing two lines...\")\ntwo_new_lines()\nprint(\"Printed two lines\")",
"You can do the same tricks that we learnt to apply on the built-in functions, like asking for help or for a function type:",
"help(happy_birthday_to_emily)\n\ntype(happy_birthday_to_emily)",
"The help we get on a function will become more interesting once we learn about function inputs and outputs ;-)\n1.4 Working with function input\n1.4.1 Parameters and arguments\nWe use parameters and arguments to make a function execute a task depending on the input we provide. For instance, we can change the function above to input the name of a person and print a birthday song using this name. This results in a more generic function.\nTo understand how we use parameters and arguments, keep in mind the distinction between function definition and function call.\nParameter: The variable name in the function definition below is a parameter. Variables used in function definitions are called parameters. \nArgument: The variable my_name in the function call below is a value for the parameter name at the time when the function is called. We refer to such variables as arguments. We use arguments so we can direct the function to do different kinds of work when we call it at different times.",
"# function definition with using the parameter `name'\ndef happy_birthday(name): \n \"\"\"\n Print a birthday song with the \"name\" of the person inserted.\n \"\"\"\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday to you!\")\n print(f\"Happy Birthday, dear {name}.\")\n print(\"Happy Birthday to you!\")\n\n# function call using specifying the value of the argument\nhappy_birthday(\"James\")",
"We can also store the name in a variable:",
"my_name=\"James\"\nhappy_birthday(my_name)",
"If we forgot to specify the name, we get an error:",
"happy_birthday()",
"Functions can have multiple parameters. We can for example multiply two numbers in a function (using the two parameters x and y) and then call the function by giving it two arguments:",
"def multiply(x, y):\n \"\"\"Multiply two numeric values.\"\"\"\n result = x * y\n print(result)\n \nmultiply(2020,5278238)\nmultiply(2,3)",
"1.4.2 Positional vs keyword parameters and arguments\nThe function definition tells Python which parameters are positional and which are keyword. As you might remember, positional means that you have to give an argument for that parameter; keyword means that you can give an argument value, but this is not necessary because there is a default value.\nSo, to summarize these two notes, we distinguish between:\n1) positional parameters: (we indicate these when defining a function, and they are compulsory when calling the function)\n2) keyword parameters: (we indicate these when defining a function, but they have a default value - and are optional when calling the function)\nFor example, if we want to have a function that can either multiply two or three numbers, we can make the third parameter a keyword parameter with a default of 1 (remember that any number multiplied with 1 results in that number):",
"def multiply(x, y, third_number=1): # x and y are positional parameters, third_number is a keyword parameter\n \"\"\"Multiply two or three numbers and print the result.\"\"\"\n result=x*y*third_number\n print(result)\n\nmultiply(2,3) # We only specify values for the positional parameters\nmultiply(2,3,third_number=4) # We specify values for both the positional parameters, and the keyword parameter",
"If we do not specify a value for a positional parameter, the function call will fail (with a very helpful error message):",
"multiply(3)",
"1.5 Output: the return statement\nFunctions can have a return statement. The return statement returns a value back to the caller and always ends the execution of the function. This also allows us to use the result of a function outside of that function by assigning it to a variable:",
"def multiply(x, y):\n \"\"\"Multiply two numbers and return the result.\"\"\"\n multiplied = x * y\n return multiplied\n\n#here we assign the returned value to variable z\nresult = multiply(2, 5)\n\nprint(result)",
"We can also print the result directly (without assigning it to a variable), which gives us the same effect as using the print statements we used before:",
"print(multiply(30,20))",
"If we assign the result to a variable, but do not use the return statement, the function cannot return it. Instead, it returns None (as you can try out below).\nThis is important to realize: even functions without a return statement do return a value, albeit a rather boring one. This value is called None (it’s a built-in name). You have seen this already with list methods - for example list.append(val) adds a value to a list, but does not return anything explicitly.",
"def multiply_no_return(x, y):\n \"\"\"Multiply two numbers and does not return the result.\"\"\"\n result = x * y\n \nis_this_a_result = multiply_no_return(2,3)\nprint(is_this_a_result)",
"Returning multiple values\nSimilarly as the input, a function can also return multiple values as output. We call such a collection of values a tuple (does this term sound familiar ;-)?).",
"def calculate(x,y):\n \"\"\"Calculate product and sum of two numbers.\"\"\"\n product = x * y\n summed = x + y\n \n #we return a tuple of values\n return product, summed\n\n# the function returned a tuple and we unpack it to var1 and var2\nvar1, var2 = calculate(10,5)\n\nprint(\"product:\",var1,\"sum:\",var2)",
"Make sure you actually save your 2 values into 2 variables, or else you end up with errors or unexpected behavior:",
"#this will assign `var` to a tuple:\nvar = calculate(10,5)\nprint(var)\n\n#this will generate an error\nvar1, var2, var3 = calculate(10,5)",
"Saving the resulting values in different variables can be useful when you want to use them in different places in your code:",
"def sum_and_diff_len_strings(string1, string2):\n \"\"\"\n Return the sum of and difference between the lengths of two strings.\n \"\"\"\n sum_strings = len(string1) + len(string2)\n diff_strings = len(string1) - len(string2)\n return sum_strings, diff_strings\n\nsum_strings, diff_strings = sum_and_diff_len_strings(\"horse\", \"dog\")\nprint(\"Sum:\", sum_strings)\nprint(\"Difference:\", diff_strings)",
"1.6 Documenting your functions with docstrings\nDocstring is a string that occurs as the first statement in a function definition.\nFor consistency, always use \"\"\"triple double quotes\"\"\" around docstrings. Triple quotes are used even though the string fits on one line. This makes it easy to expand it later.\nThere's no blank line either before or after the docstring.\nThe docstring is a phrase ending in a period. It prescribes the function or method's effect as a command (\"Do this\", \"Return that\"), not as a description; e.g., don't write \"Returns the pathname ...\".\nIn practice, there are several formats for writing docstrings, and all of them contain more information than the single sentence description we mention here. Probably the most well-known format is reStructured Text. Here is an example of a function description in reStructured Text (reST):",
"def my_function(param1, param2):\n \"\"\"\n This is a reST style.\n\n :param param1: this is a first param\n :param param2: this is a second param\n :returns: this is a description of what is returned\n \"\"\"\n return ",
"You can see that this docstring describes the function goal, its parameters, its outputs, and the errors it raises.\nIt is a good practice to write a docstring for your functions, so we will always do this! For now we will stick with single-sentence docstrings\nYou can read more about this topic here, here, and here.\n1.7 Debugging a function\nSometimes, it can hard to write a function that works perfectly. A common practice in programming is to check whether the function performs as you expect it to do. The assert statement is one way of debugging your function. The syntax is as follows:\nassert code == your expected output,message to show when code does not work as you'd expected\nLet's try this on our simple function.",
"def is_even(p):\n \"\"\"Check whether a number is even.\"\"\"\n if p % 2 == 1:\n return False\n else:\n return True",
"If the function output is what you expect, Python will show nothing.",
"input_value = 2\nexpected_output = True\nactual_output = is_even(input_value)\nassert actual_output == expected_output, f'expected {expected_output}, got {actual_output}'",
"However, when the actual output is different from what we expected, we got an error. Let's say we made a mistake in writing the function.",
"def is_even(p):\n \"\"\"Check whether a number is even.\"\"\"\n if p % 2 == 1:\n return False\n else:\n return False\n\ninput_value = 2\nexpected_output = True\nactual_output = is_even(input_value)\nassert actual_output == expected_output, f'expected {expected_output}, got {actual_output}'",
"1.8 Storing a function in a Python module\nSince Python functions are nice blocks of code with a clear focus, wouldn't it be nice if we can store them in a file? By doing this, we make our code visually very appealing since we are only left with functions calls instead of function definitions.\nPlease open the file utils_chapter11.py (is in the same folder as the notebook you are now reading). In it, you will find three of the functions that we've shown so far in this notebook. So, how can we use those functions? We can import the function using the following syntax:\nfrom NAME OF FILE WITHOUT .PY import function name",
"from utils_chapter11 import happy_birthday\n\nhappy_birthday('George')\n\nfrom utils_chapter11 import multiply\n\nmultiply(1,2)\n\nfrom utils_chapter11 import is_even\n\nis_it_even = is_even(5)\nprint(is_it_even)",
"2. Variable scope\nPlease note: scope is a hard concept to grasp, but we think it is important to introduce it here. We will do our best to repeat it during the course.\nAny variables you declare in a function, as well as the arguments that are passed to a function will only exist within the scope of that function, i.e., inside the function itself. The following code will produce an error, because the variable x does not exist outside of the function:",
"def setx():\n \"\"\"Set the value of a variable to 1.\"\"\"\n x = 1\n \n\nsetx()\nprint(x)",
"Even when we return x, it does not exist outside of the function:",
"def setx():\n \"\"\"Set the value of a variable to 1.\"\"\"\n x = 1\n return x\n \nsetx()\nprint(x)",
"Also consider this:",
"x = 0\ndef setx():\n \"\"\"Set the value of a variable to 1.\"\"\"\n x = 1\nsetx()\nprint(x)",
"In fact, this code has produced two completely unrelated x's!\nSo, you can not read a local variable outside of the local context. Nevertheless, it is possible to read a global variable from within a function, in a strictly read-only fashion.",
"x = 1\ndef getx():\n \"\"\"Print the value of a variable x.\"\"\"\n print(x)\n \ngetx()",
"You can use two built-in functions in Python when you are unsure whether a variable is local or global. The function locals() returns a list of all local variables, and the function globals() - a list of all global variables. Note that there are many non-interesting system variables that these functions return, so in practice it is best to check for membership with the in operator. For example:",
"a=3\nb=2\n\ndef setb():\n \"\"\"Set the value of a variable b to 11.\"\"\"\n b=11\n c=20\n print(\"Is 'a' defined locally in the function:\", 'a' in locals())\n print(\"Is 'b' defined locally in the function:\", 'b' in locals())\n print(\"Is 'b' defined globally:\", 'b' in globals())\n \nsetb()\n\nprint(\"Is 'a' defined globally:\", 'a' in globals())\nprint(\"Is 'b' defined globally:\", 'b' in globals())\n\nprint(\"Is 'c' defined globally:\", 'c' in globals())",
"Finally, note that the local context stays local to the function, and is not shared even with other functions called within a function, for example:",
"def setb_again():\n \"\"\"Set the value of a variable to 3.\"\"\"\n b=3\n print(\"in 'setb_again' b =\", b)\n\ndef setb():\n \"\"\"Set the value of a variable b to 2.\"\"\"\n b=2\n setb_again()\n print(\"in 'setb' b =\", b)\nb=1\nsetb()\nprint(\"global b =\", b)",
"We call the function setb() from the global context, and we call the function setb_again() from the context of the function setb(). The variable b in the function setb_again() is set to 3, but this does not affect the value of this variable in the function setb() which is still 2. And as we saw before, the changes in setb() do not influence the value of the global variable (b=1).\nExercises\nExercise 1: \nWrite a function that converts meters to centimeters and prints the resulting value.",
"# you code here",
"Exercise 2: \nAdd another keyword parameter message to the multiply function, which will allow a user to print a message. The default value of this keyword parameter should be an empty string. Test this with 2 messages of your choice. Also test it without specifying a value for the keyword argument when calling a function.",
"# function to modify:\n\ndef multiply(x, y, third_number=1): \n \"\"\"Multiply two or three numbers and print the result.\"\"\"\n result=x*y*third_number\n print(result)",
"Exercise 3: \nWrite a function called multiple_new_lines which takes as argument an integer and prints that many newlines by calling the function newLine.",
"def new_line():\n \"\"\"Print a new line.\"\"\"\n print()\n \n# you code here",
"Exercise 4: \nLet's refactor the happy birthday function to have no repetition. Note that previously we print \"Happy birthday to you!\" three times. Make another function happy_birthday_to_you() that only prints this line and call it inside the function happy_birthday(name).",
"def happy_birthday_to_you():\n # your code here\n\n# original function - replace the print statements by the happy_birthday_to_you() function:\ndef happy_birthday(name): \n \"\"\"\n Print a birthday song with the \"name\" of the person inserted.\n \"\"\"\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday to you!\")\n print(\"Happy Birthday, dear \" + name + \".\")\n print(\"Happy Birthday to you!\")",
"Exercise 5: \nTry to figure out what is going on in the following examples. How does Python deal with the order of calling functions?",
"def multiply(x, y, third_number=1): \n \"\"\"Multiply two or three numbers and print the result.\"\"\"\n result=x*y*third_number\n \n return result\n \nprint(multiply(1+1,6-2))\nprint(multiply(multiply(4,2),multiply(2,5)))\nprint(len(str(multiply(10,100))))\n",
"Exercise 6: \nComplete this code to switch the values of two variables:",
"def switch_two_values(x,y):\n# your code here\n \na='orange'\nb='apple'\n\na,b = switch_two_values(a,b) # `a` should contain \"apple\" after this call, and `b` should contain \"orange\"\n\nprint(a,b)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
MilanUngerer/BEM_microwire
|
Code_instructions.ipynb
|
mit
|
[
"Explanation of ''ejecutor.py'' code\nThis notebook is an explanation to understand the functionality of the code that compute the field\nThe system of equations to solve this notebook consist in a matrix with two wires inserted represented with the matrix showed below . For more information on how to form this system please see https://github.com/MilanUngerer/BEM_microwire/blob/master/documentation/Tesis.pdf\n\\begin{equation}\n \\begin{bmatrix}\n -D_{ext}^{mm} - D_{int}^{mm} & S_{int}^{mm} + \\frac{1}{\\alpha}S_{ext}^{mm} & D_{ext}^{1m} & -S_{ext}^{1m} & D_{ext}^{2m} & -S_{ext}^{2m}\\\n \\\n -D_{ext}^{'mm} - D_{int}^{'mm} & (\\frac{\\alpha - 1}{2\\alpha})+ S_{int}^{'mm} + \\frac{1}{\\alpha}S_{ext}^{'mm} & D_{ext}^{'1m} & -S_{ext}^{'1m} & D_{ext}^{'2m} & -S_{ext}^{'2m}\\\n \\\n -D_{int}^{m1} & S_{int}^{m1} & -D_{ext}^{11} - D_{int}^{11} & \\alpha S_{int}^{11} + S_{ext}^{11} & D_{ext}^{21} & -S_{ext}^{21}\\\n \\\n -D_{int}^{'m1} & S_{int}^{'m1} & -D_{ext}^{'11} - D_{int}^{'11} & (\\frac{\\alpha - 1}{2})+\\alpha S_{int}^{'11} + S_{ext}^{'11} & D_{ext}^{'21} & -S_{ext}^{'21}\\\n \\\n -D_{int}^{m2} & S_{int}^{m2} & D_{ext}^{12} & -S_{ext}^{12} & -D_{ext}^{22} - D_{int}^{22} & \\alpha S_{int}^{22} + S_{ext}^{22}\\\n \\\n -D_{int}^{'m2} & S_{int}^{'m2} & D_{ext}^{'12} & -S_{ext}^{'12} & -D_{ext}^{'22} - D_{int}^{'22} & (\\frac{\\alpha - 1}{2})+\\alpha S_{int}^{'22} + S_{ext}^{'22}\\\n \\end{bmatrix}\n \\begin{bmatrix}\n u^{int}m\\\n \\\n \\frac{\\partial u^{int}_m}{\\partial n}\\\n \\\n u^{ext}_1\\\n \\\n \\frac{\\partial u^{ext}_1}{\\partial n}\\\n \\\n u^{ext}_2\\\n \\\n \\frac{\\partial u^{ext}_2}{\\partial n}\\\n \\end{bmatrix}\n =\n \\begin{bmatrix}\n u{inc}^1\\\n \\\n \\frac{\\partial u_{inc}^1}{\\partial n}\\\n \\\n 0\\\n \\\n 0\\\n \\\n 0\\\n \\\n 0\\\n \\\n \\end{bmatrix}\n\\end{equation} \nFirst of all we have to define some constants:",
"#Preambulo\nimport numpy as np\nimport bempp.api\nomega = 2.*np.pi*10.e9\ne0 = 8.854*1e-12*1e-18\nmu0 = 4.*np.pi*1e-7*1e6\nmue = (1.)*mu0\nee = (16.)*e0\nmui = (-2.9214+0.5895j)*mu0\nei = (82629.2677-200138.2211j)*e0\nk = omega*np.sqrt(e0*mu0)\nlam = 2*np.pi/k\nnm = np.sqrt((ee*mue)/(e0*mu0))\nnc = np.sqrt((ei*mui)/(e0*mu0))\nalfa_m = mue/mu0\nalfa_c = mui/mue\nantena = np.array([[1e4],[0.],[0.]])\nprint \"Numero de onda exterior:\", k\nprint \"Indice de refraccion matriz:\", nm\nprint \"Indice de refraccion conductor:\", nc\nprint \"Numero de onda interior matriz:\", nm*k\nprint \"Numero de onda interior conductor:\", nm*nc*k\nprint \"Indice de transmision matriz:\", alfa_m\nprint \"Indice de transmision conductor:\", alfa_c\nprint \"Longitud de onda:\", lam, \"micras\"",
"In the following lines are the codes to import the meshes that we will use",
"#Importando mallas\nmatriz = bempp.api.import_grid('/home/milan/matriz_12x12x300_E16772.msh')\ngrid_0 = bempp.api.import_grid('/home/milan/PH1_a5_l10_E5550_D2.msh')\ngrid_1 = bempp.api.import_grid('/home/milan/PH2_a5_l10_E5550_D2.msh')",
"Also, we have to define the boundary functions that we will use to apply the boundary conditions. In this case an armonic wave for Dirichlet and his derivate for Neumann",
"#Funciones de dirichlet y neumann\ndef dirichlet_fun(x, n, domain_index, result):\n result[0] = 1.*np.exp(1j*k*x[0])\ndef neumann_fun(x, n, domain_index, result):\n result[0] = 1.*1j*k*n[0]*np.exp(1j*k*x[0])",
"Now it's time to define the multitrace operators that represent the diagonal of the matrix. This operators have the information of the transmision between the geometries. The definition of the multitrace (A) is posible to see below:\n$$\nA =\n\\begin{bmatrix}\n-K & S\\\nD & K'\n\\end{bmatrix}\n$$\nwhere K represent the double layer boundary operator, S the single layer, D the hypersingular and K' the adjoint double layer bounday operator",
"#Operadores multitrazo\nAi_m = bempp.api.operators.boundary.helmholtz.multitrace_operator(matriz, nm*k)\nAe_m = bempp.api.operators.boundary.helmholtz.multitrace_operator(matriz, k)\nAi_0 = bempp.api.operators.boundary.helmholtz.multitrace_operator(grid_0,nm*nc*k)\nAe_0 = bempp.api.operators.boundary.helmholtz.multitrace_operator(grid_0,nm*k)\nAi_1 = bempp.api.operators.boundary.helmholtz.multitrace_operator(grid_1,nm*nc*k)\nAe_1 = bempp.api.operators.boundary.helmholtz.multitrace_operator(grid_1,nm*k)\n\n#Transmision en Multitrazo\nAe_m[0,1] = Ae_m[0,1]*(1./alfa_m)\nAe_m[1,1] = Ae_m[1,1]*(1./alfa_m)\nAi_0[0,1] = Ai_0[0,1]*alfa_c\nAi_0[1,1] = Ai_0[1,1]*alfa_c\nAi_1[0,1] = Ai_1[0,1]*alfa_c\nAi_1[1,1] = Ai_1[1,1]*alfa_c\n\n#Acople interior y exterior\nop_m = (Ai_m + Ae_m)\nop_0 = (Ai_0 + Ae_0)\nop_1 = (Ai_1 + Ae_1)",
"In order to obtain the spaces created with the multitrace opertaor it's posible to do the following:",
"#Espacios\ndirichlet_space_m = Ai_m[0,0].domain\nneumann_space_m = Ai_m[0,1].domain\ndirichlet_space_0 = Ai_0[0,0].domain\nneumann_space_0 = Ai_0[0,1].domain\ndirichlet_space_1 = Ai_1[0,0].domain\nneumann_space_1 = Ai_1[0,1].domain",
"To make the complete diagonal of the main matrix showed at beggining is necessary to define the identity operators:",
"#Operadores identidad\nident_m = bempp.api.operators.boundary.sparse.identity(neumann_space_m, neumann_space_m, neumann_space_m)\nident_0 = bempp.api.operators.boundary.sparse.identity(neumann_space_0, neumann_space_0, neumann_space_0)\nident_1 = bempp.api.operators.boundary.sparse.identity(neumann_space_1, neumann_space_1, neumann_space_1)",
"And now assembly with the multitrace operators,",
"#Operadores diagonales\nop_m[1,1] = op_m[1,1] + 0.5 * ident_m * ((alfa_m -1)/alfa_m)\nop_0[1,1] = op_0[1,1] + 0.5 * ident_0* (alfa_c - 1)\nop_1[1,1] = op_1[1,1] + 0.5 * ident_1* (alfa_c - 1)",
"The contribution between the different geometries are represented via the operators between the meshes, below are showed the codes to create the operator between the meshes:",
"#Operadores entre mallas\nSLP_m_0 = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_m, dirichlet_space_0, dirichlet_space_0, nm*k)\nSLP_0_m = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_0, dirichlet_space_m, dirichlet_space_m, nm*k)\nDLP_m_0 = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_m, dirichlet_space_0, dirichlet_space_0, nm*k)\nDLP_0_m = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_0, dirichlet_space_m, dirichlet_space_m, nm*k)\nADLP_m_0 = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_m, neumann_space_0, neumann_space_0, nm*k)\nADLP_0_m = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_0, neumann_space_m, neumann_space_m, nm*k)\nHYP_m_0 = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_m, neumann_space_0, neumann_space_0, nm*k)\nHYP_0_m = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_0, neumann_space_m, neumann_space_m, nm*k)\nSLP_0_1 = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_0, dirichlet_space_1, dirichlet_space_1, nm*k)\nDLP_0_1 = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_0, dirichlet_space_1, dirichlet_space_1, nm*k)\nADLP_0_1 = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_0, neumann_space_1, neumann_space_1, nm*k)\nHYP_0_1 = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_0, neumann_space_1, neumann_space_1, nm*k)\nSLP_m_1 = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_m, dirichlet_space_1, dirichlet_space_1, nm*k)\nSLP_1_m = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_1, dirichlet_space_m, dirichlet_space_m, nm*k)\nDLP_m_1 = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_m, dirichlet_space_1, dirichlet_space_1, nm*k)\nDLP_1_m = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_1, dirichlet_space_m, dirichlet_space_m, nm*k)\nADLP_m_1 = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_m, neumann_space_1, neumann_space_1, nm*k)\nADLP_1_m = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_1, neumann_space_m, neumann_space_m, nm*k)\nHYP_m_1 = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_m, neumann_space_1, neumann_space_1, nm*k)\nHYP_1_m = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_1, neumann_space_m, neumann_space_m, nm*k)\nSLP_1_0 = bempp.api.operators.boundary.helmholtz.single_layer(neumann_space_1, dirichlet_space_0, dirichlet_space_0, nm*k)\nDLP_1_0 = bempp.api.operators.boundary.helmholtz.double_layer(dirichlet_space_1, dirichlet_space_0, dirichlet_space_0, nm*k)\nADLP_1_0 = bempp.api.operators.boundary.helmholtz.adjoint_double_layer(neumann_space_1, neumann_space_0, neumann_space_0, nm*k)\nHYP_1_0 = bempp.api.operators.boundary.helmholtz.hypersingular(dirichlet_space_1, neumann_space_0, neumann_space_0, nm*k)",
"The first subinedx corresponds to the domain space, the second one to the range space. Now is time to create the big block that will have all the operators together, in this case the size is 6X6",
"#Matriz de operadores\nblocked = bempp.api.BlockedOperator(6,6)",
"Below are showed the form to assembly all the operators in the big block:",
"#Diagonal\nblocked[0,0] = op_m[0,0]\nblocked[0,1] = op_m[0,1]\nblocked[1,0] = op_m[1,0]\nblocked[1,1] = op_m[1,1]\nblocked[2,2] = op_0[0,0]\nblocked[2,3] = op_0[0,1]\nblocked[3,2] = op_0[1,0]\nblocked[3,3] = op_0[1,1]\nblocked[4,4] = op_1[0,0]\nblocked[4,5] = op_1[0,1]\nblocked[5,4] = op_1[1,0]\nblocked[5,5] = op_1[1,1]\n\n#Contribucion hilos-matriz\nblocked[0,2] = DLP_0_m\nblocked[0,3] = -SLP_0_m\nblocked[1,2] = -HYP_0_m\nblocked[1,3] = -ADLP_0_m\nblocked[0,4] = DLP_1_m\nblocked[0,5] = -SLP_1_m\nblocked[1,4] = -HYP_1_m\nblocked[1,5] = -ADLP_1_m\n\n#Contribucion hilos-hilos\nblocked[2,4] = DLP_1_0\nblocked[2,5] = -SLP_1_0\nblocked[3,4] = -HYP_1_0\nblocked[3,5] = -ADLP_1_0\n\n#Contribucion matriz-hilos\nblocked[2,0] = -DLP_m_0\nblocked[2,1] = SLP_m_0\nblocked[3,0] = HYP_m_0\nblocked[3,1] = ADLP_m_0\n\n#Contribucion hilos-hilos\nblocked[4,2] = DLP_0_1\nblocked[4,3] = -SLP_0_1\nblocked[5,2] = -HYP_0_1\nblocked[5,3] = -ADLP_0_1\n\n#Contribucion matriz-hilos\nblocked[4,0] = -DLP_m_1\nblocked[4,1] = SLP_m_1\nblocked[5,0] = HYP_m_1\nblocked[5,1] = ADLP_m_1",
"The definition of boundary conditions, the discretization of the operators and the discretization of right side are:",
"#Condiciones de borde\ndirichlet_grid_fun_m = bempp.api.GridFunction(dirichlet_space_m, fun=dirichlet_fun)\nneumann_grid_fun_m = bempp.api.GridFunction(neumann_space_m, fun=neumann_fun)\n\n#Discretizacion lado izquierdo\nblocked_discretizado = blocked.strong_form()\n\n#Discretizacion lado derecho\nrhs = np.concatenate([dirichlet_grid_fun_m.coefficients, neumann_grid_fun_m.coefficients,np.zeros(dirichlet_space_0.global_dof_count), np.zeros(neumann_space_0.global_dof_count), np.zeros(dirichlet_space_1.global_dof_count), np.zeros(neumann_space_1.global_dof_count)])",
"Now it's time to solve the system of equations, in this work we used a gmres. Also we save the solution and arrays to plot the convergence later.",
"#Sistema de ecuaciones\nimport inspect\nfrom scipy.sparse.linalg import gmres\narray_it = np.array([])\narray_frame = np.array([])\nit_count = 0\ndef iteration_counter(x):\n global array_it\n global array_frame\n global it_count\n it_count += 1\n frame = inspect.currentframe().f_back\n array_it = np.append(array_it, it_count)\n array_frame = np.append(array_frame, frame.f_locals[\"resid\"])\n print it_count, frame.f_locals[\"resid\"]\nprint(\"Shape of matrix: {0}\".format(blocked_discretizado.shape))\nx,info = gmres(blocked_discretizado, rhs, tol=1e-5, callback = iteration_counter, maxiter = 50000)\nprint(\"El sistema fue resuelto en {0} iteraciones\".format(it_count))\nnp.savetxt(\"Solucion.out\", x, delimiter=\",\")\n ",
"Now we can reorder the solution and use it for calculate the field in some point in the exterior of the matrix:",
"#Campo interior\ninterior_field_dirichlet_m = bempp.api.GridFunction(dirichlet_space_m, coefficients=x[:dirichlet_space_m.global_dof_count])\ninterior_field_neumann_m = bempp.api.GridFunction(neumann_space_m,coefficients=x[dirichlet_space_m.global_dof_count:dirichlet_space_m.global_dof_count + neumann_space_m.global_dof_count])\n\n#Campo exterior\nexterior_field_dirichlet_m = interior_field_dirichlet_m\nexterior_field_neumann_m = interior_field_neumann_m*(1./alfa_m)\n\n#Calculo campo en antena\nslp_pot_ext_m = bempp.api.operators.potential.helmholtz.single_layer(dirichlet_space_m, antena, k)\ndlp_pot_ext_m = bempp.api.operators.potential.helmholtz.double_layer(dirichlet_space_m, antena, k)\nCampo_en_antena = (dlp_pot_ext_m * exterior_field_dirichlet_m - slp_pot_ext_m * exterior_field_neumann_m).ravel() + np.exp(1j*k*antena[0])\nprint \"Valor del campo en receptor:\", Campo_en_antena\n\n",
"Finally we plot the convergence and export it",
"import matplotlib\nmatplotlib.use(\"Agg\")\nfrom matplotlib import pyplot\nfrom matplotlib import rcParams\nrcParams[\"font.family\"] = \"serif\"\nrcParams[\"font.size\"] = 20\npyplot.figure(figsize = (15,10))\npyplot.title(\"Convergence\")\npyplot.plot(array_it, array_frame, lw=2)\npyplot.xlabel(\"iteration\")\npyplot.ylabel(\"residual\")\npyplot.grid()\npyplot.savefig(\"Convergence.pdf\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
campagnucci/api_sof
|
SOF_Execucao_Orcamentaria_SMESP.ipynb
|
gpl-3.0
|
[
"Explorando as despesas da educação na cidade de São Paulo\nUm tutorial de primeiros passos para acessar a execução orçamentária da Secretaria de Educação do município usando Python e a biblioteca de análise de dados Pandas *\nPasso 1. Cadastro na API e token de acesso\n\nAcessar a Vitrine de APIs da Prodam:https://api.prodam.sp.gov.br/store/\nSelecione a API do SOF\nClique em \"Inscrever-se\"\nAcesse o menu \"Minhas assinaturas\"\nGere uma chave de acesso de produção; coloque um valor de validade negativo, para evitar que expire\nCopie o Token de Acesso\n\nPasso 2. Teste na API Console\nA API Console (entre na aba com esse nome neste link aqui) é uma interface que permite testar as diferentes consultas e obter a URL com os parâmetros desejados. Por exemplo, se deseja obter todos os contratos da Secretaria de Educação em 2017, basta entrar no item /consultaContrato e informar \"2017\" no campo anoContrato e \"16\" (código da Educação) no campo codOrgao. A URL resultante dessa consulta e que você vai usar nos passos seguintes é https://gatewayapi.prodam.sp.gov.br:443/financas/orcamento/sof/v2.1.0/consultaContrato?anoContrato=2017&codOrgao=16 \nPasso 3. Mãos ao Pandas!\nEste é o script que consulta a API (para qualquer URL gerada acima) e transforma o arquivo obtido em formato json para um Data Frame do Pandas, a partir do qual será possível fazer as análises. Substitua a constante TOKEN pelo seu código de assinatura!",
"import pandas as pd\nimport requests\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nTOKEN = '198f959a5f39a1c441c7c863423264'\n\nbase_url = \"https://gatewayapi.prodam.sp.gov.br:443/financas/orcamento/sof/v2.1.0\"\n\nheaders={'Authorization' : str('Bearer ' + TOKEN)}\n\nanos = [2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]\n\n#Anos desejados na consulta; é possível consultar informações a partir de 2003",
"Orçamento\nPrimeiro, vamos ter uma visão geral do que foi orçado para a Secretaria Municipal de Educação desde 2011 até o ano corrente, bem como os valores congelados e já executados. Isso é possível com a consulta \"Despesas\"",
"df_lista = []\na = 0 \n\nfor ano in anos:\n \"\"\"consulta todos os anos da lista acima\"\"\"\n \n url_orcado = '{base_url}/consultarDespesas?anoDotacao={ano}&mesDotacao=12&codOrgao=16'.format(base_url=base_url, ano=ano)\n request_orcado = requests.get(url_orcado,\n headers=headers,\n verify=True).json()\n \n df_orcado = pd.DataFrame(request_orcado['lstDespesas'])\n df_lista.append(df_orcado)\n df_lista[a].loc[:,'anoExercicio'] = str(ano)\n #adiciona uma coluna com o ano de exercício, já que esta informação não vem na API\n a += 1\n\ndf_total = pd.concat(df_lista, axis=0, ignore_index=True)\n\ndf_total",
"Uma visão dos valores orçados (valor atualizado no início do ano, após projeção mais adequada das receitas) e o liquidado, nos últimos meses do ano:",
"series = df_total[['anoExercicio', 'valOrcadoAtualizado','valLiquidado']].set_index('anoExercicio')\n\nseries = series[['valOrcadoAtualizado', 'valLiquidado']].divide(1000000000)\n\ngrafico1 = series[['valOrcadoAtualizado','valLiquidado']].plot(kind='bar', title =\"Orçado x Liquidado\", figsize=(15, 7), legend=True, fontsize=12)\n\ngrafico1.set_xlabel(\"Ano\", fontsize=12)\n\ngrafico1.set_ylabel(\"Em bilhões de R$\", fontsize=12)\n\nplt.show(grafico1)",
"Empenhos\nEmpenho é o ato em que autoridade verifica a existência do crédito orçamentário e autoriza a execução da despesa (por exemplo, para realizar uma licitação). A partir daí, os valores vão sendo liquidados e pagos conforme a execução de um contrato. \nA API fornece apenas uma página na consulta. O script abaixo checa a quantidade de páginas nos metadados da consulta e itera o número de vezes necessário para obter todas as páginas.\nOpção 1. Um ano inteiro\nVamos ver quanto a Secretaria Municipal de Educação empenhou de seu orçamento em 2017.",
"pagination = '&numPagina={PAGE}'\nano_empenho = 2017\n\nrequest_empenhos = requests.get('{base_url}/consultaEmpenhos?anoEmpenho={ano}&mesEmpenho=12&codOrgao=16'.format(base_url=base_url, ano=ano_empenho), \n headers=headers, verify=True).json()\n \n number_of_pages = request_empenhos['metadados']['qtdPaginas']\n\n todos_empenhos = []\n \n todos_empenhos = todos_empenhos + request_empenhos['lstEmpenhos']\n \n if number_of_pages>1:\n \n for p in range(2, number_of_pages+1):\n \n request_empenhos = requests.get('{base_url}/consultaEmpenhos?anoEmpenho={ano}&mesEmpenho=12&codOrgao=16'.format(base_url=base_url, ano=ano_empenho) + pagination.format(PAGE=p), \n headers=headers, verify=True).json()\n \n todos_empenhos = todos_empenhos + request_empenhos['lstEmpenhos']\n \ndf_empenhos = pd.DataFrame(todos_empenhos)",
"Opção 2. Série Histórica",
"pagination = '&numPagina={PAGE}'",
"Atenção: as consultas podem demorar horas, a depender da quantidade de anos requerida; verifique se o número de anos acima é realmente necessário; faça apenas isso uma vez, e guarde a base para análises futuras",
"df_empenhos_lista = []\n\nfor ano in anos:\n\n request_empenhos = requests.get('{base_url}/consultaEmpenhos?anoEmpenho={ano}&mesEmpenho=12&codOrgao=16'.format(base_url=base_url, ano=ano), \n headers=headers, verify=True).json()\n \n number_of_pages = request_empenhos['metadados']['qtdPaginas']\n\n todos_empenhos = []\n \n todos_empenhos = todos_empenhos + request_empenhos['lstEmpenhos']\n \n if number_of_pages>1:\n \n for p in range(2, number_of_pages+1):\n \n request_empenhos = requests.get('{base_url}/consultaEmpenhos?anoEmpenho={ano}&mesEmpenho=12&codOrgao=16'.format(base_url=base_url, ano=ano) + pagination.format(PAGE=p), \n headers=headers, verify=True).json()\n \n todos_empenhos = todos_empenhos + request_empenhos['lstEmpenhos']\n \n df_empenhos = pd.DataFrame(todos_empenhos)\n \n df_empenhos_lista.append(df_empenhos)\n\ndf_empenhos_serie = pd.concat(df_empenhos_lista, ignore_index=True)\n\ndf_empenhos_serie.columns",
"Com os passos acima, fizemos a requisição de todas as páginas e convertemos o arquivo formato json em um DataFrame. Agora podemos trabalhar com a análise desses dado no Pandas. Para checar quantos registros existentes, vamos ver o final da lista (aqui havia apenas 2016-2017):",
"df_empenhos_serie.tail()",
"Modalidades de Aplicação\nAqui vemos a quantidade de recursos aplicados na Saúde, a título de exemplo, por Modalidade -- se é aplicação na rede direta ou repasse a organizações sociais. Note que o mesmo poderia ser feito para qualquer órgão, ou mesmo para a Prefeitura como um todo:",
"modalidades = df_empenhos_serie.groupby('txtModalidadeAplicacao')['valTotalEmpenhado', 'valLiquidado'].sum()\n\nmodalidades\n\n# Outra maneira de fazer a mesma operação:\n#pd.pivot_table(df_empenhos, values='valTotalEmpenhado', index=['txtModalidadeAplicacao'], aggfunc=np.sum)",
"Maiores despesas\nAqui vamos produzir a lista das 15 maiores despesas da Educação neste período:",
"despesas = pd.pivot_table(df_empenhos_serie, \n values=['valLiquidado', 'valPagoExercicio'], \n index=['numCpfCnpj', 'txtRazaoSocial', 'txtDescricaoProjetoAtividade'], \n aggfunc=np.sum).sort_values('valPagoExercicio', axis=0, ascending=False, inplace=False, kind='quicksort', na_position='last')\n\ndespesas.head(15)",
"Fontes de recursos\nAgrupamento dos empenhos por fonte de recursos:",
"fonte = pd.pivot_table(df_empenhos_serie, \n values=['valLiquidado', 'valPagoExercicio'], \n index=['txtDescricaoFonteRecurso'], \n aggfunc=np.sum).sort_values('valPagoExercicio', axis=0, ascending=False, inplace=False, kind='quicksort', na_position='last')\n\nfonte",
"Passo 4. Quer salvar um csv?\nO objetivo deste tutorial não era fazer uma análise exaustiva da base, mas apenas mostrar o que é possível a partir do consumo da API. Você também pode salvar toda a base de empenhos num arquivo .csv e trabalhar no seu Excel (super te entendo). O Pandas também ajuda nisso! Assim:",
"df_empenhos_serie.to_csv('serie_empenhos.csv')",
"Pronto, seu arquivo está lá salvo no mesmo diretório deste jupyter notebook!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
albahnsen/ML_SecurityInformatics
|
exercises/05-IntrusionDetection.ipynb
|
mit
|
[
"Exercise 05\nLogistic regression exercise to detect network intrusions\nSoftware to detect network intrusions protects a computer network from unauthorized users, including perhaps insiders. The intrusion detector learning task is to build a predictive model (i.e. a classifier) capable of distinguishing between bad connections, called intrusions or attacks, and good normal connections.\nThe 1998 DARPA Intrusion Detection Evaluation Program was prepared and managed by MIT Lincoln Labs. The objective was to survey and evaluate research in intrusion detection. A standard set of data to be audited, which includes a wide variety of intrusions simulated in a military network environment, was provided. The 1999 KDD intrusion detection contest uses a version of this dataset.\nLincoln Labs set up an environment to acquire nine weeks of raw TCP dump data for a local-area network (LAN) simulating a typical U.S. Air Force LAN. They operated the LAN as if it were a true Air Force environment, but peppered it with multiple attacks.\nThe raw training data was about four gigabytes of compressed binary TCP dump data from seven weeks of network traffic. This was processed into about five million connection records. Similarly, the two weeks of test data yielded around two million connection records. description\nA connection is a sequence of TCP packets starting and ending at some well defined times, between which data flows to and from a source IP address to a target IP address under some well defined protocol. Each connection is labeled as either normal, or as an attack, with exactly one specific attack type. Each connection record consists of about 100 bytes.\nAttacks fall into four main categories:\n\nDOS: denial-of-service, e.g. syn flood;\nR2L: unauthorized access from a remote machine, e.g. guessing password;\nU2R: unauthorized access to local superuser (root) privileges, e.g., various buffer overflow attacks;\nprobing: surveillance and other probing, e.g., port scanning.\nIt is important to note that the test data is not from the same probability distribution as the training data, and it includes specific attack types not in the training data. This makes the task more realistic. Some intrusion experts believe that most novel attacks are variants of known attacks and the \"signature\" of known attacks can be sufficient to catch novel variants. The datasets contain a total of 24 training attack types, with an additional 14 types in the test data only. \n\nRead the data into Pandas",
"import pandas as pd\npd.set_option('display.max_columns', 500)\nimport zipfile\nwith zipfile.ZipFile('../datasets/UNB_ISCX_NSL_KDD.csv.zip', 'r') as z:\n f = z.open('UNB_ISCX_NSL_KDD.csv')\n data = pd.io.parsers.read_table(f, sep=',')\ndata.head()",
"Create X and y\nUse only same_srv_rate and dst_host_srv_count",
"y = (data['class'] == 'anomaly').astype(int)\n\ny.value_counts()\n\nX = data[['same_srv_rate','dst_host_srv_count']]",
"Exercise 5.1\nSplit the data into training and testing sets\nExercise 5.2\nFit a logistic regression model and examine the coefficients\nExercise 5.3\nMake predictions on the testing set and calculate the accuracy\nExercise 5.4\nConfusion matrix of predictions\nWhat is the percentage of detected anomalies\nExercise 5.5\nIncrease sensitivity by lowering the threshold for predicting anomaly connection\nCreate a new classifier by changing the probability threshold to 0.3\nWhat is the new confusion matrix?\nWhat is the new percentage of detected anomalies?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
spennihana/h2o-3
|
h2o-py/demos/H2O_tutorial_eeg_eyestate.ipynb
|
apache-2.0
|
[
"H2O Tutorial: EEG Eye State Classification\nAuthor: Erin LeDell\nContact: erin@h2o.ai\nThis tutorial steps through a quick introduction to H2O's Python API. The goal of this tutorial is to introduce through a complete example H2O's capabilities from Python. \nMost of the functionality for a Pandas DataFrame is exactly the same syntax for an H2OFrame, so if you are comfortable with Pandas, data frame manipulation will come naturally to you in H2O. The modeling syntax in the H2O Python API may also remind you of scikit-learn. \nReferences: H2O Python API documentation and H2O general documentation\nInstall H2O in Python\nPrerequisites\nThis tutorial assumes you have Python 2.7 installed. The h2o Python package has a few dependencies which can be installed using pip. The packages that are required are (which also have their own dependencies):\nbash\npip install requests\npip install tabulate\npip install scikit-learn\nIf you have any problems (for example, installing the scikit-learn package), check out this page for tips.\nInstall h2o\nOnce the dependencies are installed, you can install H2O. We will use the latest stable version of the h2o package, which is currently \"Tibshirani-8.\" The installation instructions are on the \"Install in Python\" tab on this page.\n```bash\nThe following command removes the H2O module for Python (if it already exists).\npip uninstall h2o\nNext, use pip to install this version of the H2O Python module.\npip install http://h2o-release.s3.amazonaws.com/h2o/rel-tibshirani/8/Python/h2o-3.6.0.8-py2.py3-none-any.whl\n```\nFor reference, the Python documentation for the latest stable release of H2O is here.\nStart up an H2O cluster\nIn a Python terminal, we can import the h2o package and start up an H2O cluster.",
"import h2o\n\n# Start an H2O Cluster on your local machine\nh2o.init()",
"If you already have an H2O cluster running that you'd like to connect to (for example, in a multi-node Hadoop environment), then you can specify the IP and port of that cluster as follows:",
"# This will not actually do anything since it's a fake IP address\n# h2o.init(ip=\"123.45.67.89\", port=54321)",
"Download EEG Data\nThe following code downloads a copy of the EEG Eye State dataset. All data is from one continuous EEG measurement with the Emotiv EEG Neuroheadset. The duration of the measurement was 117 seconds. The eye state was detected via a camera during the EEG measurement and added later manually to the file after analysing the video frames. '1' indicates the eye-closed and '0' the eye-open state. All values are in chronological order with the first measured value at the top of the data.\n\nWe can import the data directly into H2O using the import_file method in the Python API. The import path can be a URL, a local path, a path to an HDFS file, or a file on Amazon S3.",
"#csv_url = \"http://www.stat.berkeley.edu/~ledell/data/eeg_eyestate_splits.csv\"\ncsv_url = \"https://h2o-public-test-data.s3.amazonaws.com/smalldata/eeg/eeg_eyestate_splits.csv\"\ndata = h2o.import_file(csv_url)",
"Explore Data\nOnce we have loaded the data, let's take a quick look. First the dimension of the frame:",
"data.shape\n",
"Now let's take a look at the top of the frame:",
"data.head()",
"The first 14 columns are numeric values that represent EEG measurements from the headset. The \"eyeDetection\" column is the response. There is an additional column called \"split\" that was added (by me) in order to specify partitions of the data (so we can easily benchmark against other tools outside of H2O using the same splits). I randomly divided the dataset into three partitions: train (60%), valid (%20) and test (20%) and marked which split each row belongs to in the \"split\" column.\nLet's take a look at the column names. The data contains derived features from the medical images of the tumors.",
"data.columns",
"To select a subset of the columns to look at, typical Pandas indexing applies:",
"columns = ['AF3', 'eyeDetection', 'split']\ndata[columns].head()",
"Now let's select a single column, for example -- the response column, and look at the data more closely:",
"y = 'eyeDetection'\ndata[y]",
"It looks like a binary response, but let's validate that assumption:",
"data[y].unique()",
"If you don't specify the column types when you import the file, H2O makes a guess at what your column types are. If there are 0's and 1's in a column, H2O will automatically parse that as numeric by default. \nTherefore, we should convert the response column to a more efficient \"enum\" representation -- in this case it is a categorial variable with two levels, 0 and 1. If the only column in my data that is categorical is the response, I typically don't bother specifying the column type during the parse, and instead use this one-liner to convert it aftewards:",
"data[y] = data[y].asfactor()",
"Now we can check that there are two levels in our response column:",
"data[y].nlevels()",
"We can query the categorical \"levels\" as well ('0' and '1' stand for \"eye open\" and \"eye closed\") to see what they are:",
"data[y].levels()",
"We may want to check if there are any missing values, so let's look for NAs in our dataset. For tree-based methods like GBM and RF, H2O handles missing feature values automatically, so it's not a problem if we are missing certain feature values. However, it is always a good idea to check to make sure that you are not missing any of the training labels. \nTo figure out which, if any, values are missing, we can use the isna method on the diagnosis column. The columns in an H2O Frame are also H2O Frames themselves, so all the methods that apply to a Frame also apply to a single column.",
"data.isna()\n\ndata[y].isna()",
"The isna method doesn't directly answer the question, \"Does the response column contain any NAs?\", rather it returns a 0 if that cell is not missing (Is NA? FALSE == 0) and a 1 if it is missing (Is NA? TRUE == 1). So if there are no missing values, then summing over the whole column should produce a summand equal to 0.0. Let's take a look:",
"data[y].isna().sum()",
"Great, no missing labels. :-)\nOut of curiosity, let's see if there is any missing data in this frame:",
"data.isna().sum()",
"The sum is still zero, so there are no missing values in any of the cells.\nThe next thing I may wonder about in a binary classification problem is the distribution of the response in the training data. Is one of the two outcomes under-represented in the training set? Many real datasets have what's called an \"imbalanace\" problem, where one of the classes has far fewer training examples than the other class. Let's take a look at the distribution:",
"data[y].table()",
"Ok, the data is not exactly evenly distributed between the two classes -- there are more 0's than 1's in the dataset. However, this level of imbalance shouldn't be much of an issue for the machine learning algos. (We will revisit this later in the modeling section below).\nLet's calculate the percentage that each class represents:",
"n = data.shape[0] # Total number of training samples\ndata[y].table()['Count']/n",
"Split H2O Frame into a train and test set\nSo far we have explored the original dataset (all rows). For the machine learning portion of this tutorial, we will break the dataset into three parts: a training set, validation set and a test set.\nIf you want H2O to do the splitting for you, you can use the split_frame method. However, we have explicit splits that we want (for reproducibility reasons), so we can just subset the Frame to get the partitions we want. \nSubset the data H2O Frame on the \"split\" column:",
"train = data[data['split']==\"train\"]\ntrain.shape\n\nvalid = data[data['split']==\"valid\"]\nvalid.shape\n\ntest = data[data['split']==\"test\"]\ntest.shape",
"Machine Learning in H2O\nWe will do a quick demo of the H2O software using a Gradient Boosting Machine (GBM). The goal of this problem is to train a model to predict eye state (open vs closed) from EEG data. \nTrain and Test a GBM model",
"# Import H2O GBM:\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\n",
"We first create a model object of class, \"H2OGradientBoostingEstimator\". This does not actually do any training, it just sets the model up for training by specifying model parameters.",
"model = H2OGradientBoostingEstimator(distribution='bernoulli',\n ntrees=100,\n max_depth=4,\n learn_rate=0.1)",
"Specify the predictor set and response\nThe model object, like all H2O estimator objects, has a train method, which will actually perform model training. At this step we specify the training and (optionally) a validation set, along with the response and predictor variables.\nThe x argument should be a list of predictor names in the training frame, and y specifies the response column. We have already set y = \"eyeDetector\" above, but we still need to specify x.",
"x = list(train.columns)\nx\n\ndel x[12:14] #Remove the 13th and 14th columns, 'eyeDetection' and 'split'\nx",
"Now that we have specified x and y, we can train the model:",
"model.train(x=x, y=y, training_frame=train, validation_frame=valid)",
"Inspect Model\nThe type of results shown when you print a model, are determined by the following:\n- Model class of the estimator (e.g. GBM, RF, GLM, DL)\n- The type of machine learning problem (e.g. binary classification, multiclass classification, regression)\n- The data you specify (e.g. training_frame only, training_frame and validation_frame, or training_frame and nfolds)\nBelow, we see a GBM Model Summary, as well as training and validation metrics since we supplied a validation_frame. Since this a binary classification task, we are shown the relevant performance metrics, which inclues: MSE, R^2, LogLoss, AUC and Gini. Also, we are shown a Confusion Matrix, where the threshold for classification is chosen automatically (by H2O) as the threshold which maximizes the F1 score.\nThe scoring history is also printed, which shows the performance metrics over some increment such as \"number of trees\" in the case of GBM and RF.\nLastly, for tree-based methods (GBM and RF), we also print variable importance.",
"print(model)",
"Model Performance on a Test Set\nOnce a model has been trained, you can also use it to make predictions on a test set. In the case above, we just ran the model once, so our validation set (passed as validation_frame), could have also served as a \"test set.\" We technically have already created test set predictions and evaluated test set performance. \nHowever, when performing model selection over a variety of model parameters, it is common for users to train a variety of models (using different parameters) using the training set, train, and a validation set, valid. Once the user selects the best model (based on validation set performance), the true test of model performance is performed by making a final set of predictions on the held-out (never been used before) test set, test.\nYou can use the model_performance method to generate predictions on a new dataset. The results are stored in an object of class, \"H2OBinomialModelMetrics\".",
"perf = model.model_performance(test)\nprint(perf.__class__)",
"Individual model performance metrics can be extracted using methods like auc and mse. In the case of binary classification, we may be most interested in evaluating test set Area Under the ROC Curve (AUC).",
"perf.auc()\n\nperf.mse()",
"Cross-validated Performance\nTo perform k-fold cross-validation, you use the same code as above, but you specify nfolds as an integer greater than 1, or add a \"fold_column\" to your H2O Frame which indicates a fold ID for each row.\nUnless you have a specific reason to manually assign the observations to folds, you will find it easiest to simply use the nfolds argument.\nWhen performing cross-validation, you can still pass a validation_frame, but you can also choose to use the original dataset that contains all the rows. We will cross-validate a model below using the original H2O Frame which is called data.",
"cvmodel = H2OGradientBoostingEstimator(distribution='bernoulli',\n ntrees=100,\n max_depth=4,\n learn_rate=0.1,\n nfolds=5)\n\ncvmodel.train(x=x, y=y, training_frame=data)\n",
"This time around, we will simply pull the training and cross-validation metrics out of the model. To do so, you use the auc method again, and you can specify train or xval as True to get the correct metric.",
"print(cvmodel.auc(train=True))\nprint(cvmodel.auc(xval=True))",
"Grid Search\nOne way of evaluting models with different parameters is to perform a grid search over a set of parameter values. For example, in GBM, here are three model parameters that may be useful to search over:\n- ntrees: Number of trees\n- max_depth: Maximum depth of a tree\n- learn_rate: Learning rate in the GBM\nWe will define a grid as follows:",
"ntrees_opt = [5,50,100]\nmax_depth_opt = [2,3,5]\nlearn_rate_opt = [0.1,0.2]\n\nhyper_params = {'ntrees': ntrees_opt, \n 'max_depth': max_depth_opt,\n 'learn_rate': learn_rate_opt}",
"Define an \"H2OGridSearch\" object by specifying the algorithm (GBM) and the hyper parameters:",
"from h2o.grid.grid_search import H2OGridSearch\n\ngs = H2OGridSearch(H2OGradientBoostingEstimator, hyper_params = hyper_params)",
"An \"H2OGridSearch\" object also has a train method, which is used to train all the models in the grid.",
"gs.train(x=x, y=y, training_frame=train, validation_frame=valid)",
"Compare Models",
"print(gs)\n\n# print out the auc for all of the models\nauc_table = gs.sort_by('auc(valid=True)',increasing=False)\nprint(auc_table)",
"The \"best\" model in terms of validation set AUC is listed first in auc_table.",
"best_model = h2o.get_model(auc_table['Model Id'][0])\nbest_model.auc()",
"The last thing we may want to do is generate predictions on the test set using the \"best\" model, and evaluate the test set AUC.",
"best_perf = best_model.model_performance(test)\nbest_perf.auc()",
"The test set AUC is approximately 0.96. Not bad!!"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
0.24/_downloads/eb0c29f55af0173daab811d4f4dc2f40/simulated_raw_data_using_subject_anatomy.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Simulate raw data using subject anatomy\nThis example illustrates how to generate source estimates and simulate raw data\nusing subject anatomy with the :class:mne.simulation.SourceSimulator class.\nOnce the raw data is simulated, generated source estimates are reconstructed\nusing dynamic statistical parametric mapping (dSPM) inverse operator.",
"# Author: Ivana Kojcic <ivana.kojcic@gmail.com>\n# Eric Larson <larson.eric.d@gmail.com>\n# Kostiantyn Maksymenko <kostiantyn.maksymenko@gmail.com>\n# Samuel Deslauriers-Gauthier <sam.deslauriers@gmail.com>\n\n# License: BSD-3-Clause\n\nimport os.path as op\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n# In this example, raw data will be simulated for the sample subject, so its\n# information needs to be loaded. This step will download the data if it not\n# already on your machine. Subjects directory is also set so it doesn't need\n# to be given to functions.\ndata_path = sample.data_path()\nsubjects_dir = op.join(data_path, 'subjects')\nsubject = 'sample'\nmeg_path = op.join(data_path, 'MEG', subject)\n\n# First, we get an info structure from the sample subject.\nfname_info = op.join(meg_path, 'sample_audvis_raw.fif')\ninfo = mne.io.read_info(fname_info)\ntstep = 1 / info['sfreq']\n\n# To simulate sources, we also need a source space. It can be obtained from the\n# forward solution of the sample subject.\nfwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')\nfwd = mne.read_forward_solution(fwd_fname)\nsrc = fwd['src']\n\n# To simulate raw data, we need to define when the activity occurs using events\n# matrix and specify the IDs of each event.\n# Noise covariance matrix also needs to be defined.\n# Here, both are loaded from the sample dataset, but they can also be specified\n# by the user.\n\nfname_event = op.join(meg_path, 'sample_audvis_raw-eve.fif')\nfname_cov = op.join(meg_path, 'sample_audvis-cov.fif')\n\nevents = mne.read_events(fname_event)\nnoise_cov = mne.read_cov(fname_cov)\n\n# Standard sample event IDs. These values will correspond to the third column\n# in the events matrix.\nevent_id = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'smiley': 5, 'button': 32}\n\n\n# Take only a few events for speed\nevents = events[:80]",
"In order to simulate source time courses, labels of desired active regions\nneed to be specified for each of the 4 simulation conditions.\nMake a dictionary that maps conditions to activation strengths within\naparc.a2009s :footcite:DestrieuxEtAl2010 labels.\nIn the aparc.a2009s parcellation:\n\n'G_temp_sup-G_T_transv' is the label for primary auditory area\n'S_calcarine' is the label for primary visual area\n\nIn each of the 4 conditions, only the primary area is activated. This means\nthat during the activations of auditory areas, there are no activations in\nvisual areas and vice versa.\nMoreover, for each condition, contralateral region is more active (here, 2\ntimes more) than the ipsilateral.",
"activations = {\n 'auditory/left':\n [('G_temp_sup-G_T_transv-lh', 30), # label, activation (nAm)\n ('G_temp_sup-G_T_transv-rh', 60)],\n 'auditory/right':\n [('G_temp_sup-G_T_transv-lh', 60),\n ('G_temp_sup-G_T_transv-rh', 30)],\n 'visual/left':\n [('S_calcarine-lh', 30),\n ('S_calcarine-rh', 60)],\n 'visual/right':\n [('S_calcarine-lh', 60),\n ('S_calcarine-rh', 30)],\n}\n\nannot = 'aparc.a2009s'\n\n# Load the 4 necessary label names.\nlabel_names = sorted(set(activation[0]\n for activation_list in activations.values()\n for activation in activation_list))\nregion_names = list(activations.keys())",
"Create simulated source activity\nGenerate source time courses for each region. In this example, we want to\nsimulate source activity for a single condition at a time. Therefore, each\nevoked response will be parametrized by latency and duration.",
"def data_fun(times, latency, duration):\n \"\"\"Function to generate source time courses for evoked responses,\n parametrized by latency and duration.\"\"\"\n f = 15 # oscillating frequency, beta band [Hz]\n sigma = 0.375 * duration\n sinusoid = np.sin(2 * np.pi * f * (times - latency))\n gf = np.exp(- (times - latency - (sigma / 4.) * rng.rand(1)) ** 2 /\n (2 * (sigma ** 2)))\n return 1e-9 * sinusoid * gf",
"Here, :class:~mne.simulation.SourceSimulator is used, which allows to\nspecify where (label), what (source_time_series), and when (events) event\ntype will occur.\nWe will add data for 4 areas, each of which contains 2 labels. Since add_data\nmethod accepts 1 label per call, it will be called 2 times per area.\nEvoked responses are generated such that the main component peaks at 100ms\nwith a duration of around 30ms, which first appears in the contralateral\ncortex. This is followed by a response in the ipsilateral cortex with a peak\nabout 15ms after. The amplitude of the activations will be 2 times higher in\nthe contralateral region, as explained before.\nWhen the activity occurs is defined using events. In this case, they are\ntaken from the original raw data. The first column is the sample of the\nevent, the second is not used. The third one is the event id, which is\ndifferent for each of the 4 areas.",
"times = np.arange(150, dtype=np.float64) / info['sfreq']\nduration = 0.03\nrng = np.random.RandomState(7)\nsource_simulator = mne.simulation.SourceSimulator(src, tstep=tstep)\n\nfor region_id, region_name in enumerate(region_names, 1):\n events_tmp = events[np.where(events[:, 2] == region_id)[0], :]\n for i in range(2):\n label_name = activations[region_name][i][0]\n label_tmp = mne.read_labels_from_annot(subject, annot,\n subjects_dir=subjects_dir,\n regexp=label_name,\n verbose=False)\n label_tmp = label_tmp[0]\n amplitude_tmp = activations[region_name][i][1]\n if region_name.split('/')[1][0] == label_tmp.hemi[0]:\n latency_tmp = 0.115\n else:\n latency_tmp = 0.1\n wf_tmp = data_fun(times, latency_tmp, duration)\n source_simulator.add_data(label_tmp,\n amplitude_tmp * wf_tmp,\n events_tmp)\n\n# To obtain a SourceEstimate object, we need to use `get_stc()` method of\n# SourceSimulator class.\nstc_data = source_simulator.get_stc()",
"Simulate raw data\nProject the source time series to sensor space. Three types of noise will be\nadded to the simulated raw data:\n\nmultivariate Gaussian noise obtained from the noise covariance from the\n sample data\nblink (EOG) noise\nECG noise\n\nThe :class:~mne.simulation.SourceSimulator can be given directly to the\n:func:~mne.simulation.simulate_raw function.",
"raw_sim = mne.simulation.simulate_raw(info, source_simulator, forward=fwd)\nraw_sim.set_eeg_reference(projection=True)\n\nmne.simulation.add_noise(raw_sim, cov=noise_cov, random_state=0)\nmne.simulation.add_eog(raw_sim, random_state=0)\nmne.simulation.add_ecg(raw_sim, random_state=0)\n\n# Plot original and simulated raw data.\nraw_sim.plot(title='Simulated raw data')",
"Extract epochs and compute evoked responsses",
"epochs = mne.Epochs(raw_sim, events, event_id, tmin=-0.2, tmax=0.3,\n baseline=(None, 0))\nevoked_aud_left = epochs['auditory/left'].average()\nevoked_vis_right = epochs['visual/right'].average()\n\n# Visualize the evoked data\nevoked_aud_left.plot(spatial_colors=True)\nevoked_vis_right.plot(spatial_colors=True)",
"Reconstruct simulated source time courses using dSPM inverse operator\nHere, source time courses for auditory and visual areas are reconstructed\nseparately and their difference is shown. This was done merely for better\nvisual representation of source reconstruction.\nAs expected, when high activations appear in primary auditory areas, primary\nvisual areas will have low activations and vice versa.",
"method, lambda2 = 'dSPM', 1. / 9.\ninv = mne.minimum_norm.make_inverse_operator(epochs.info, fwd, noise_cov)\nstc_aud = mne.minimum_norm.apply_inverse(\n evoked_aud_left, inv, lambda2, method)\nstc_vis = mne.minimum_norm.apply_inverse(\n evoked_vis_right, inv, lambda2, method)\nstc_diff = stc_aud - stc_vis\n\nbrain = stc_diff.plot(subjects_dir=subjects_dir, initial_time=0.1,\n hemi='split', views=['lat', 'med'])",
"References\n.. footbibliography::"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
leonarduk/stockmarketview
|
timeseries-analysis-python/src/main/python/FinanceOps/02_Comparing_Stock_Indices.ipynb
|
apache-2.0
|
[
"Comparing Stock Indices\nby Magnus Erik Hvass Pedersen\n/ GitHub / Videos on YouTube\nIntroduction\nWhen comparing the historical returns on stock indices, it is a common mistake to only consider a single time-period.\nWe will compare three well-known stock indices for USA: The S&P 500 for large-cap stocks, the S&P 400 for mid-cap stocks, and the S&P 600 for small-cap stocks. We show that there are periods where each of these stock indices was better than the others.\nSo a more proper way of comparing stock indices is to consider all investment periods. For example, instead of just comparing the returns between 1. January 1990 to 1. January 2018, we consider all 1-year investment periods between 1990 and 2018. We also consider all 2-year investment periods, 3-year periods, and so on all the way up to 10-year investment periods. We then calculate and compare various statistics to assess which stock index was best.\nThis methodology was also used in the paper Comparison of U.S. Stock Indices.\nPython Imports\nThis Jupyter Notebook is implemented in Python v. 3.6 and requires various packages for numerical computations and plotting. See the installation instructions in the README-file.",
"%matplotlib inline\n\n# Imports from Python packages.\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FuncFormatter\nimport pandas as pd\nimport numpy as np\nimport os\n\n# Imports from FinanceOps.\nfrom data_keys import *\nfrom data import load_index_data, load_usa_cpi\nfrom data import load_usa_gov_bond_1year, common_period\nfrom returns import annualized_returns, bond_annualized_returns\nfrom recovery import prob_recovery",
"Load Data\nWe now load all the financial data we will be using.",
"# Define the ticker-names for the stocks we consider.\nticker_SP500 = \"S&P 500\"\nticker_SP400 = \"S&P 400\"\nticker_SP600 = \"S&P 600\"\n\n# All tickers for the stocks.\ntickers = [ticker_SP500, ticker_SP400, ticker_SP600]\n\n# Define longer names for the stocks.\nname_SP500 = \"S&P 500 (Large Cap)\"\nname_SP400 = \"S&P 400 (Mid Cap)\"\nname_SP600 = \"S&P 600 (Small Cap)\"\n\n# All names for the stocks.\nnames = [name_SP500, name_SP400, name_SP600]\n\n# Load the financial data for the stock indices.\ndf_SP500 = load_index_data(ticker=ticker_SP500)\ndf_SP400 = load_index_data(ticker=ticker_SP400,\n sales=False, book_value=False)\ndf_SP600 = load_index_data(ticker=ticker_SP600,\n sales=False, book_value=False)\n\n# All DataFrames for the stocks.\ndfs = [df_SP500, df_SP400, df_SP600]\n\n# Common date-range for the stocks.\nstart_date, end_date = common_period(dfs=dfs)\nprint(start_date, end_date)\n\n# Load the US CPI inflation index.\ncpi = load_usa_cpi()\n\n# Load the yields for US Gov. Bonds with 1-year maturity.\nbond_yields = load_usa_gov_bond_1year()\n\n# Max number of investment years to consider.\nnum_years = 10",
"Compare Total Returns\nThe first plot shows the so-called Total Return of the stock indices, which is the investor's return when dividends are reinvested in the same stock index and taxes are ignored.",
"def plot_total_returns(dfs, names, start_date=None, end_date=None):\n \"\"\"\n Plot and compare the Total Returns for the given DataFrames.\n \n :param dfs: List of Pandas DataFrames with TOTAL_RETURN data.\n :param names: Names of the stock indices.\n :param start_date: Plot from this date.\n :param end_date: Plot to this date.\n :return: None.\n \"\"\"\n \n # Create a new Pandas DataFrame which will be used\n # to combine the time-series and plot them.\n df2 = pd.DataFrame()\n\n # For all the given DataFrames and their names.\n for df, name in zip(dfs, names):\n # Get the Total Return for the period.\n tot_ret = df[TOTAL_RETURN][start_date:end_date]\n # Normalize it to start at 1.0\n tot_ret /= tot_ret[0]\n\n # Add it to the DataFrame.\n # It will be plotted with the given name.\n df2[name] = tot_ret\n\n # Plot it all.\n df2.plot(title=\"Total Return\")\n\nplot_total_returns(dfs=dfs, names=names,\n start_date=start_date, end_date=end_date)",
"This plot clearly shows that the S&P 400 (Mid-Cap) had a much higher Total Return than the S&P 500 (Large-Cap) and S&P 600 (Small-Cap), and the S&P 500 performed slightly worse than the S&P 600.\nBut this period was nearly 30 years. What if we consider shorter investment periods with different start and end-dates? We need more detailed statistics to answer these questions.\nCalculate Annualized Returns\nWe calculate the annualized returns of the stock indices for all investment periods of durations from 1 to 10 years.",
"def calc_ann_returns(df, start_date, end_date, num_years):\n \"\"\"\n Calculate the annualized returns for the Total Return\n of the given DataFrame.\n \n A list is returned so that ann_ret[0] is a Pandas Series\n with the ann.returns for 1-year periods, and ann_ret[1]\n are the ann.returns for 2-year periods, etc.\n \n :param df: Pandas DataFrame with TOTAL_RETURN data.\n :param start_date: Start-date for the data.\n :param end_date: End-date for the data.\n :return: List of Pandas Series.\n \"\"\"\n \n # Get the Total Return for the given period.\n tot_ret = df[TOTAL_RETURN][start_date:end_date]\n \n # Calculate the annualized returns for all\n # investment periods between 1 and num_years.\n ann_ret = [annualized_returns(series=tot_ret, years=years)\n for years in range(1, num_years+1)]\n \n return ann_ret\n\n# Annualized returns for the S&P 500.\nann_ret_SP500 = calc_ann_returns(df=df_SP500,\n start_date=start_date,\n end_date=end_date,\n num_years=num_years)\n\n# Annualized returns for the S&P 400.\nann_ret_SP400 = calc_ann_returns(df=df_SP400,\n start_date=start_date,\n end_date=end_date,\n num_years=num_years)\n\n# Annualized returns for the S&P 600.\nann_ret_SP600 = calc_ann_returns(df=df_SP600,\n start_date=start_date,\n end_date=end_date,\n num_years=num_years)\n\n# Annualized returns for investing and reinvesting in\n# US Gov. Bonds with 1-year maturity.\nann_ret_bond = bond_annualized_returns(df=bond_yields,\n num_years=num_years)\n\n# Annualized returns for the US CPI inflation index.\ncpi2 = cpi[start_date:end_date]\nann_ret_cpi = [annualized_returns(series=cpi2, years=i+1)\n for i in range(num_years)]",
"Examples of Annualized Returns\nThe lists we have created above contain the annualized returns for the stock indices as well as US Government Bonds and the US CPI inflation index.\nLet us show the annualized returns of the S&P 500 for all 1-year periods. This is itself a time-series. It shows that the return was about 0.347 (or 34.7%) for the year between 3. January 1989 and 3. January 1990. The return was only about 31.6% between 4. January 1989 and 4. January 1990. And so on.",
"ann_ret_SP500[0].head(10)",
"We can also show the summary statistics for the annualized returns of all 1-year periods of the S&P 500. Note that a mean of about 0.113 means an average 1-year return of 11.3%.",
"ann_ret_SP500[0].describe()",
"We can also show the annualized returns of the S&P 500 for all 10-year periods. This shows that between 3. January 1989 and 1999 the annualized return was about 19.3%. Between 4. January 1989 and 1999 it was about 19.1%.",
"ann_ret_SP500[9].head(10)",
"These are the summary statistics for all 10-year periods of the S&P 500, which show that it returned about 8.2% per year on average, for all 10-year periods between 1989 and 2018.",
"ann_ret_SP500[9].describe()",
"For US Government Bonds we only consider bonds with 1-year maturity, so for multi-year periods we assume the return is reinvested in new 1-year bonds. Reinvesting in gov. bonds gave an average return of about 5.7% for all 10-year periods between 1962 and 2018.",
"ann_ret_bond[9].describe()",
"Examples of Good and Bad Periods\nUsing the annualized returns we have just calculated, we can now easily find investment periods where one stock index was better than another.",
"def plot_better(df1, df2, ann_ret1, ann_ret2,\n name1, name2, years):\n \"\"\"\n Plot the Total Return for a period of the given number\n of years where the return on stock 1 > stock 2.\n\n If this does not exist, then plot for the period where\n the return of stock 1 was closest to that of stock 2.\n \n :param df1: Pandas DataFrame for stock 1.\n :param df2: Pandas DataFrame for stock 2.\n :param ann_ret1: List of ann.returns for stock 1.\n :param ann_ret2: List of ann.returns for stock 2.\n :param name1: Name of stock 1.\n :param name2: Name of stock 2.\n :param years: Investment period in years.\n :return: None.\n \"\"\"\n\n # Convert number of years to index.\n i = years - 1\n\n # Difference of annualized returns.\n ann_ret_dif = ann_ret1[i] - ann_ret2[i]\n\n # Find the biggest return difference and use its\n # index as the start-date for the period to be plotted.\n start_date = ann_ret_dif.idxmax()\n \n # The end-date for the period to be plotted.\n days = int(years * 365.25)\n end_date = start_date + pd.Timedelta(days=days)\n \n # Create a Pandas DataFrame with stock 1,\n # whose Total Return is normalized to start at 1.0\n df = pd.DataFrame()\n tot_ret1 = df1[start_date:end_date][TOTAL_RETURN]\n df[name1] = tot_ret1 / tot_ret1[0]\n\n # Add stock 2 to the DataFrame.\n tot_ret2 = df2[start_date:end_date][TOTAL_RETURN]\n df[name2] = tot_ret2 / tot_ret2[0]\n\n # Plot the two stocks' Total Return for this period.\n df.plot(title=\"Total Return\")",
"First we show a 3-year period where the S&P 500 was better than the S&P 400.",
"plot_better(df1=df_SP500, df2=df_SP400,\n ann_ret1=ann_ret_SP500,\n ann_ret2=ann_ret_SP400,\n name1=name_SP500,\n name2=name_SP400,\n years=3)",
"Then we show a 3-year period where the S&P 400 was better than the S&P 500.",
"plot_better(df1=df_SP400, df2=df_SP500,\n ann_ret1=ann_ret_SP400,\n ann_ret2=ann_ret_SP500,\n name1=name_SP400,\n name2=name_SP500,\n years=3)",
"Then we show a 3-year period where the S&P 600 was better than the S&P 400.",
"plot_better(df1=df_SP600, df2=df_SP400,\n ann_ret1=ann_ret_SP600,\n ann_ret2=ann_ret_SP400,\n name1=name_SP600,\n name2=name_SP400,\n years=3)",
"Then we show a 3-year period where the S&P 400 was better than the S&P 600.",
"plot_better(df1=df_SP400, df2=df_SP600,\n ann_ret1=ann_ret_SP400,\n ann_ret2=ann_ret_SP600,\n name1=name_SP400,\n name2=name_SP600,\n years=3)",
"Statistics for Annualized Returns\nWe can also print summary statistics for the annualized returns.",
"def print_return_stats():\n \"\"\"\n Print basic statistics for the annualized returns.\n \"\"\"\n\n # For each period-duration.\n for i in range(num_years):\n years = i + 1\n print(years, \"Year Investment Periods:\")\n\n # Create a new DataFrame.\n df = pd.DataFrame()\n \n # Add the basic statistics for each stock.\n df[name_SP500] = ann_ret_SP500[i].describe()\n df[name_SP400] = ann_ret_SP400[i].describe()\n df[name_SP600] = ann_ret_SP600[i].describe()\n\n # Print it.\n print(df)\n print()",
"When we print the summary statistics for the stock indices, we see that for 1-year investment periods the S&P 500 returned about 11.3% on average, while the S&P 400 returned about 14.0%, and the S&P 600 returned about 12.4%.\nFor longer investment periods the average returns decrease. For 10-year investment periods the S&P 500 returned about 8.2% per year on average, the S&P 400 returned about 11.6% on average, and the S&P 600 returned about 10.3% on average.\nIt can be a bit confusing to view all the summary statistics like this and it is better to show selected data in a table, as was done in the paper Comparison of U.S. Stock Indices.",
"print_return_stats()",
"Probability of Loss\nAnother useful statistic is the historical probability of loss for different investment periods.",
"def prob_loss(ann_ret):\n \"\"\"\n Calculate the probability of negative ann.returns (losses).\n \"\"\"\n\n # Remove rows with NA.\n ann_ret = ann_ret.dropna()\n \n # Calculate the probability using a boolean mask.\n mask = (ann_ret < 0.0)\n prob = np.sum(mask) / len(mask)\n\n return prob\n\ndef print_prob_loss():\n \"\"\"\n Print the probability of loss for increasing investment\n periods for all the stocks.\n \"\"\"\n\n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add a column with the probability of loss for S&P 500.\n df[name_SP500] = [prob_loss(ann_ret_SP500[i])\n for i in range(num_years)]\n\n # Add a column with the probability of loss for S&P 400.\n df[name_SP400] = [prob_loss(ann_ret_SP400[i])\n for i in range(num_years)]\n\n # Add a column with the probability of loss for S&P 600.\n df[name_SP600] = [prob_loss(ann_ret_SP600[i])\n for i in range(num_years)]\n\n # Set the index.\n df.index = [\"{} Years\".format(i+1) for i in range(num_years)]\n \n print(df)",
"This shows the probability of loss for the stock-indices for investment periods between 1 and 10 years.\nFor example, the S&P 500 had a loss in about 17.8% of all 1-year investment periods, while the S&P 400 had a loss in about 18.1% of all 1-year periods, and the S&P 600 had a loss in about 22.3% of all 1-year periods.\nThe probability of loss generally decreases as the investment period increases.\nFor example, the S&P 500 had a loss in about 9.6% of all 10-year investment periods, while the S&P 400 and S&P 600 did not have a loss in any of the 10-year periods.",
"print_prob_loss()",
"Compared to Inflation\nIt is also useful to consider the probability of a stock index performing better than inflation.",
"def prob_better(ann_ret1, ann_ret2):\n \"\"\"\n Calculate the probability that the ann.returns of stock 1\n were better than the ann.returns of stock 2.\n\n This does not assume the index-dates are identical.\n\n :param ann_ret1: Pandas Series with ann.returns for stock 1.\n :param ann_ret2: Pandas Series with ann.returns for stock 2.\n :return: Probability.\n \"\"\"\n\n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add the ann.returns for the two stocks.\n df[\"ann_ret1\"] = ann_ret1\n df[\"ann_ret2\"] = ann_ret2\n\n # Remove all rows with NA.\n df.dropna(inplace=True)\n\n # Calculate the probability using a boolean mask.\n mask = (df[\"ann_ret1\"] > df[\"ann_ret2\"])\n prob = np.sum(mask) / len(mask)\n\n return prob\n\ndef print_prob_better_than_inflation():\n \"\"\"\n Print the probability of the stocks performing better\n than inflation for increasing investment periods.\n \"\"\"\n \n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add a column with the probabilities for the S&P 500.\n name = ticker_SP500 + \" > CPI\"\n df[name] = [prob_better(ann_ret_SP500[i], ann_ret_cpi[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for the S&P 400.\n name = ticker_SP400 + \" > CPI\"\n df[name] = [prob_better(ann_ret_SP400[i], ann_ret_cpi[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for the S&P 600.\n name = ticker_SP600 + \" > CPI\"\n df[name] = [prob_better(ann_ret_SP600[i], ann_ret_cpi[i])\n for i in range(num_years)]\n\n # Set the index.\n df.index = [\"{} Years\".format(i+1) for i in range(num_years)]\n \n print(df)",
"This shows the probability of each stock index having a higher return than inflation for investment periods between 1 and 10 years. All taxes are ignored.\nFor example, both the S&P 500 and S&P 400 had a higher return than inflation in about 79% of all 1-year investment periods, while the S&P 600 only exceeded inflation in about 73.6% of all 1-year periods.\nFor investment periods of 6 years or more, the S&P 400 and S&P 600 performed better than inflation for almost all investment periods. But the S&P 500 only exceeded inflation in about 86% of all 10-year periods.",
"print_prob_better_than_inflation()",
"Compared to Bonds\nIt is also useful to compare the returns of the stock indices to risk-free government bonds.",
"def print_prob_better_than_bonds():\n \"\"\"\n Print the probability of the stocks performing better\n than US Gov. Bonds for increasing investment periods.\n \"\"\"\n \n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add a column with the probabilities for the S&P 500.\n name = ticker_SP500 + \" > Bonds\"\n df[name] = [prob_better(ann_ret_SP500[i], ann_ret_bond[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for the S&P 400.\n name = ticker_SP400 + \" > Bonds\"\n df[name] = [prob_better(ann_ret_SP400[i], ann_ret_bond[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for the S&P 600.\n name = ticker_SP600 + \" > Bonds\"\n df[name] = [prob_better(ann_ret_SP600[i], ann_ret_bond[i])\n for i in range(num_years)]\n\n # Set the index.\n df.index = [\"{} Years\".format(i+1) for i in range(num_years)]\n \n print(df)",
"This shows the probability of each stock index having a higher return than risk-free government bonds, for investment periods between 1 and 10 years. We consider annual reinvestment in bonds with 1-year maturity. All taxes are ignored.\nFor example, the S&P 500 returned more than government bonds in about 79% of all 1-year periods, while it was 78% for the S&P 400 and 73% for the S&P 600.\nFor investment periods of 6 years or more, the S&P 400 and S&P 600 nearly always returned more than government bonds. But the S&P 500 only returned more than bonds in about 84% of all 10-year periods.",
"print_prob_better_than_bonds()",
"Compared to Other Stock Indices\nNow we will compare the stock indices directly against each other.",
"def print_prob_better():\n \"\"\"\n Print the probability of one stock index performing better\n than another stock index for increasing investment periods.\n \"\"\"\n \n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add a column with the probabilities for S&P 500 > S&P 400.\n name = ticker_SP500 + \" > \" + ticker_SP400\n df[name] = [prob_better(ann_ret_SP500[i], ann_ret_SP400[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for S&P 500 > S&P 600.\n name = ticker_SP500 + \" > \" + ticker_SP600\n df[name] = [prob_better(ann_ret_SP500[i], ann_ret_SP600[i])\n for i in range(num_years)]\n\n # Add a column with the probabilities for S&P 600 > S&P 400.\n name = ticker_SP600 + \" > \" + ticker_SP400\n df[name] = [prob_better(ann_ret_SP600[i], ann_ret_SP400[i])\n for i in range(num_years)]\n\n # Set the index.\n df.index = [\"{} Years\".format(i+1) for i in range(num_years)]\n \n print(df) ",
"This shows the probability of one stock index performing better than another for investment periods between 1 and 10 years. All taxes are ignored.\nFor example, the S&P 500 (Large-Cap) performed better than the S&P 400 (Mid-Cap) in about 42% of all 1-year periods. Similarly, the S&P 500 performed better than the S&P 600 (Small-Cap) in almost 45% of all 1-year periods.\nFor longer investment periods the S&P 500 generally performed worse than the S&P 400 and S&P 600. For example, the S&P 500 only performed better than the S&P 400 in about 6% of all 10-year periods, and it was better than the S&P 600 in about 15% of the 10-year periods. Similarly, the S&P 600 was better than the S&P 400 in only about 21% of all 10-year periods.\nThis shows that for longer investment periods the S&P 400 (Mid-Cap) mostly had a higher return than both the S&P 500 (Large-Cap) and S&P 600 (Small-Cap).",
"print_prob_better()",
"Correlation\nIt is also useful to consider the statistical correlation between the returns of stock indices.",
"def print_correlation():\n \"\"\"\n Print the correlation between the stock indices\n for increasing investment periods.\n \"\"\"\n \n # Create a new DataFrame.\n df = pd.DataFrame()\n\n # Add a column with the correlations for S&P 500 vs. S&P 400.\n name = ticker_SP500 + \" vs. \" + ticker_SP400\n df[name] = [ann_ret_SP500[i].corr(ann_ret_SP400[i])\n for i in range(num_years)]\n\n # Add a column with the correlations for S&P 500 vs. S&P 600.\n name = ticker_SP500 + \" vs. \" + ticker_SP600\n df[name] = [ann_ret_SP500[i].corr(ann_ret_SP600[i])\n for i in range(num_years)]\n\n # Add a column with the correlations for S&P 600 vs. S&P 400.\n name = ticker_SP600 + \" vs. \" + ticker_SP400\n df[name] = [ann_ret_SP600[i].corr(ann_ret_SP400[i])\n for i in range(num_years)]\n\n # Set the index.\n df.index = [\"{} Years\".format(i+1) for i in range(num_years)]\n \n print(df)",
"This shows the correlation coefficient (Pearson) between the returns on the stock indices for investment periods between 1 and 10 years.\nFor example, the correlation was about 0.88 between the S&P 500 and S&P 400 for all 1-year investment periods, while it was only 0.77 for the S&P 500 and S&P 600, and 0.92 for the S&P 600 and S&P 400.\nFor longer investment periods the correlation coefficient generally increases. For example, the correlation was about 0.93 between the S&P 500 and S&P 400 for all 10-year investment periods, while it was about 0.85 between the S&P 500 and S&P 600, and it was almost 0.94 between the S&P 600 and S&P 400.\nThis shows that the return on these three stock indices are all highly correlated, so that they have a strong tendency to show losses or gains for the same periods.\nIt might also be useful to consider the correlation for shorter investment periods, e.g. monthly, weekly or even daily, because a low correlation between stock indices might be useful for rebalancing the investment portfolio when one stock index is down and another is up.",
"print_correlation()",
"Recovery Times\nIt is also useful to consider how quickly the stock indices typically recover from losses.",
"def print_recovery_days():\n \"\"\"\n Print the probability of the stocks recovering from losses\n for increasing number of days.\n \"\"\"\n\n # Print the probability for these days.\n num_days = [7, 30, 90, 180, 365, 2*365, 5*365]\n \n # Create a new DataFrame.\n df = pd.DataFrame()\n \n # Add a column with the probabilities for the S&P 500.\n df[ticker_SP500] = prob_recovery(df=df_SP500, num_days=num_days,\n start_date=start_date,\n end_date=end_date)\n\n # Add a column with the probabilities for the S&P 400.\n df[ticker_SP400] = prob_recovery(df=df_SP400, num_days=num_days,\n start_date=start_date,\n end_date=end_date)\n\n # Add a column with the probabilities for the S&P 600.\n df[ticker_SP600] = prob_recovery(df=df_SP600, num_days=num_days,\n start_date=start_date,\n end_date=end_date)\n\n # Set the index.\n df.index = [\"{} Days\".format(days) for days in num_days]\n \n print(df)",
"This shows the probability that each stock index has recovered from losses within a given number of days.\nFor example, all three stock indices recovered from about 80-83% of all losses within just a week. The probability goes up for longer investment periods. For example, for 5-year investment periods the S&P 500 had recovered from about 99.8% of all losses, while the S&P 400 and S&P 600 had recovered from all losses in 5 years.\nNote that this only measures the number of days until the stock index recovered the first time. It is possible that a stock index decreases again in the future. This can be seen from the non-zero probabilities of loss shown further above, where the S&P 400 and S&P 600 had losses in some 7, 8, and 9 year investment periods.",
"print_recovery_days()",
"Discussion\nFrom the above statistics we saw that the S&P 400 (Mid-Cap) was generally better than the S&P 600 (Small-Cap), which was generally better than the S&P 500 (Large-Cap), especially for longer investment periods.\nFor all 10-year investment periods between 1989 and 2018, the S&P 400 returned 11.6% per year on average, while the S&P 600 returned 10.3% on average, and the S&P 500 only returned 8.2% on average.\nFurthermore, the S&P 400 and 600 almost never had losses for investment periods of 6 years or more, while the S&P 500 had losses in about 9.6% of all 10-year investment periods.\nObviously the S&P 400 (Mid-Cap) would have been a better investment for most longer periods between 1989 and 2018. The question is whether this superiority will continue in the future?\nForecasting Future Returns\nIt is easy to imagine, that if many investors were to believe that the S&P 400 was generally a superior investment to the S&P 500, then the price of the S&P 400 would be bid up to the point where its future earnings growth could no longer justify its price and the S&P 400 would therefore end up under-performing the S&P 500 in the long-term.\nWe therefore need to forecast the future returns of the S&P 400 and S&P 600 similarly to how we did that for the S&P 500 in the previous paper. The problem is that we do not have the P/Sales or P/Book data for the S&P 400 and S&P 600. This data can be purchased from Standard & Poor's but it is quite expensive.\nWhat we have instead are the dividend payouts for the S&P 400 and 600, which can be used to calculate the P/Dividend ratio or its reciprocal value called the Dividend Yield. You are encouraged to try and run the previous Python Notebook with this data to see if you can forecast the future returns of the S&P 400 and 600 from their dividend yields.\nOnce you have forecasted the future returns of the three indices you can guage whether the indices are mispriced, so that one index might currently be preferred over the others.\nCause of Growth\nWe have not considered the underlying cause of growth for these three indices. It would be of great importance to know whether the superior returns on the S&P 400 were caused by superior growth in sales and earnings, and whether we might expect such growth to continue in the future. More detailed financial data is needed for this analysis, which unfortunately can be difficult and expensive to obtain for many stock indices.\nLicense (MIT)\nCopyright (c) 2015-18 by Magnus Erik Hvass Pedersen\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
IanHawke/orcomp-training
|
03-plotting-data.ipynb
|
mit
|
[
"Plotting\nThere are many libraries for plotting in Python. The standard library is matplotlib. Its examples and gallery are particularly useful references.\nMatplotlib is most useful if you have data in numpy arrays. We can then plot standard single graphs straightforwardly:",
"%matplotlib inline",
"The above command is only needed if you are plotting in a Jupyter notebook.\nWe now construct some data:",
"import numpy\n\nx = numpy.linspace(0, 1)\ny1 = numpy.sin(numpy.pi * x) + 0.1 * numpy.random.rand(50)\ny2 = numpy.cos(3.0 * numpy.pi * x) + 0.2 * numpy.random.rand(50)",
"And then produce a line plot:",
"from matplotlib import pyplot\npyplot.plot(x, y1)\npyplot.show()",
"We can add labels and titles:",
"pyplot.plot(x, y1)\npyplot.xlabel('x')\npyplot.ylabel('y')\npyplot.title('A single line plot')\npyplot.show()",
"We can change the plotting style, and use LaTeX style notation where needed:",
"pyplot.plot(x, y1, linestyle='--', color='black', linewidth=3)\npyplot.xlabel(r'$x$')\npyplot.ylabel(r'$y$')\npyplot.title(r'A single line plot, roughly $\\sin(\\pi x)$')\npyplot.show()",
"We can plot two lines at once, and add a legend, which we can position:",
"pyplot.plot(x, y1, label=r'$y_1$')\npyplot.plot(x, y2, label=r'$y_2$')\npyplot.xlabel(r'$x$')\npyplot.ylabel(r'$y$')\npyplot.title('Two line plots')\npyplot.legend(loc='lower left')\npyplot.show()",
"We would probably prefer to use subplots. At this point we have to leave the simple interface, and start building the plot using its individual components, figures and axes, which are objects to manipulate:",
"fig, axes = pyplot.subplots(nrows=1, ncols=2, figsize=(10,6))\naxis1 = axes[0]\naxis1.plot(x, y1)\naxis1.set_xlabel(r'$x$')\naxis1.set_ylabel(r'$y_1$')\naxis2 = axes[1]\naxis2.plot(x, y2)\naxis2.set_xlabel(r'$x$')\naxis2.set_ylabel(r'$y_2$')\nfig.tight_layout()\npyplot.show()",
"The axes variable contains all of the separate axes that you may want. This makes it easy to construct many subplots using a loop:",
"data = []\nfor nx in range(2,5):\n for ny in range(2,5):\n data.append(numpy.sin(nx * numpy.pi * x) + numpy.cos(ny * numpy.pi * x))\n\nfig, axes = pyplot.subplots(nrows=3, ncols=3, figsize=(10,10))\nfor nrow in range(3):\n for ncol in range(3):\n ndata = ncol + 3 * nrow\n axes[nrow, ncol].plot(x, data[ndata])\n axes[nrow, ncol].set_xlabel(r'$x$')\n axes[nrow, ncol].set_ylabel(r'$\\sin({} \\pi x) + \\cos({} \\pi x)$'.format(nrow+2, ncol+2))\nfig.tight_layout()\npyplot.show()",
"Matplotlib will allow you to generate and place axes pretty much wherever you like, to use logarithmic scales, to do different types of plot, and so on. Check the examples and gallery for details.\nExercise\nThe logistic map builds a sequence of numbers ${ x_n }$ using the relation\n$$\n x_{n+1} = r x_n \\left( 1 - x_n \\right),\n$$\nwhere $0 \\le x_0 \\le 1$.\n\nWrite a program that calculates the first $N$ members of the sequence, given as input $x_0$ and $r$ (and, of course, $N$).\nFix $x_0=0.5$. Calculate the first 2,000 members of the sequence for $r=1.5$ and $r=3.5$ Plot the last 100 members of the sequence in both cases. What does this suggest about the long-term behaviour of the sequence?\nFix $x_0 = 0.5$. For each value of $r$ between $1$ and $4$, in steps of $0.01$, calculate the first 2,000 members of the sequence. Plot the last 1,000 members of the sequence on a plot where the $x$-axis is the value of $r$ and the $y$-axis is the values in the sequence. Do not plot lines - just plot markers (e.g., use the 'k.' plotting style).",
"def logistic(x0, r, N = 1000):\n sequence = [x0]\n xn = x0\n for n in range(N):\n xnew = r*xn*(1.0-xn)\n sequence.append(xnew)\n xn = xnew\n return sequence\n\nx0 = 0.5\nN = 2000\nsequence1 = logistic(x0, 1.5, N)\nsequence2 = logistic(x0, 3.5, N)\npyplot.plot(sequence1[-100:], 'b-', label = r'$r=1.5$')\npyplot.plot(sequence2[-100:], 'k-', label = r'$r=3.5$')\npyplot.xlabel(r'$n$')\npyplot.ylabel(r'$x$')\npyplot.show()",
"This suggests that, for $r=1.5$, the sequence has settled down to a fixed point. In the $r=3.5$ case it seems to be moving between four points repeatedly.",
"# This is the \"best\" way of doing it, but we may not have much numpy yet\n# r_values = numpy.arange(1.0, 4.0, 0.01)\n# This way only uses lists\nr_values = []\nfor i in range(302):\n r_values.append(1.0 + 0.01 * i)\nx0 = 0.5\nN = 2000\nfor r in r_values:\n sequence = logistic(x0, r, N)\n pyplot.plot(r*numpy.ones_like(sequence[1000:]), sequence[1000:], 'k.')\npyplot.xlabel(r'$r$')\npyplot.ylabel(r'$x$')\npyplot.show()",
"The first transition from fixed point to limit cycle is at $r \\approx 3$, the next at $r \\approx 3.45$, the next at $r \\approx 3.55$. The transition to chaos appears to happen before $r=4$, but it's not obvious exactly where."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
psychemedia/ou-robotics-vrep
|
robotVM/notebooks/Demo - vrep magic.ipynb
|
apache-2.0
|
[
"VREP Magic Demo - Pioneer Robot\nDemonstration using the Pioneer robot:\n\nuse downward facing RGB sensors to follow a line.",
"#Set up the notebook to work with the V-REP simulator\n%run 'Set-up.ipynb'\n%run 'Loading scenes.ipynb'\n\n#Load the Pioneer models\n%run 'vrep_models/PioneerP3DX.ipynb'",
"Line Follower Example",
"%vrep_robot_methods PioneerP3DXL\n\n%%vrepsim '../scenes/LineFollowerPioneer.ttt' PioneerP3DXL\n# black color : 43\n# white-gray color : -53\nimport time\nwhile True:\n lclr = robot.color_left()\n rclr = robot.color_right()\n if lclr > 10:\n robot.rotate_left(0.3)\n if rclr > 10:\n robot.rotate_right(0.3)\n if lclr < -20 and rclr < -20:\n robot.move_forward(1.5)\n time.sleep(0.001)",
"Widget Demo\nThe following demo shows how to use a couple of text widgets that are updated from the robot control script.",
"import time\ndef line_follow(pioneer):\n lclr = pioneer.color_left()\n rclr = pioneer.color_right()\n if lclr > 10:\n pioneer.rotate_left(0.3)\n if rclr > 10:\n pioneer.rotate_right(0.3)\n if lclr < -20 and rclr < -20:\n pioneer.move_forward(1.5)\n time.sleep(0.001)\n\nsensorText1.description = 'Left light'\nsensorText2.description = 'Max left light'\n\ndisplay(sensorText1,sensorText2)\n\n%%vrepsim '../scenes/LineFollowerPioneer.ttt' PioneerP3DXL \nmaxval=robot.color_left() \nwhile True:\n line_follow(robot)\n sensorText1.value =str(robot.color_left())\n maxval = robot.color_left() if robot.color_left() > maxval else maxval\n sensorText2.value=str(maxval)",
"Returning data",
"%matplotlib inline\nimport pandas as pd\n\ndf=pd.DataFrame(columns=['Time','Left sensor'])\n\n#If we want to set df directly within the evaluated code in the vrepsim block\n#we need to specify it in that block using: global df\n#However, objects are mutable in that scope, so pass the dataframe that way\ndata={'df':df}\n\n%%vrepsim '../scenes/LineFollowerPioneer.ttt' PioneerP3DXL\n\nmaxval=robot.color_left()\nstart_time = time.time()\nwhile True:\n line_follow(robot)\n sensorText1.value =str(robot.color_left())\n maxval = robot.color_left() if robot.color_left() > maxval else maxval\n sensorText2.value=str(maxval)\n elapsed_time = time.time() - start_time\n data['df']=pd.concat([data['df'],pd.DataFrame([{'Time':elapsed_time,\n 'Left sensor':robot.color_left()}])])\n\ndata['df'].plot(x='Time',y='Left sensor');"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
UIUC-iSchool-DataViz/fall2017
|
week07/examples_week07.ipynb
|
bsd-3-clause
|
[
"Basics of Pandas\nThe last few weeks we have been using low-level methods to read data in to Python and manipulate it. This week we will be exploring pandas to accelerate this process.\nPandas is based around the notion that arrays can be indexed in a flexible manner, and that we can structure our data access around the indexing labels.\nWe will start out, as we often do, by applying our boilerplate setup.",
"%matplotlib inline\n\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"Pandas provides a number of read_* options, including read_csv, which we will use here.\nOne important note about read_csv in particular is that there are over 50 possible arguments to it. This allows for intensely flexible specification of how to read data in, how to parse it, and very detailed control over things like encoding of files and so forth. This flexibility is designed to eliminate the need to pre-process any data files before importing, but it can also make for a complex import process if you only have to adjust a few columns. We will use this in some of its more simple ways here.\nBelow, we read the building inventory file into an object called df (for Data Frame).",
"df = pd.read_csv(\"data-readonly/IL_Building_Inventory.csv\")",
"One of the first things we can do is examine the columns that the dataframe has identified.",
"df.columns\n\ndf.head()\n\ndf.tail()\n\ndf.describe()\n\ndf.dtypes\n\ndf.groupby([\"Agency Name\"])[\"Square Footage\"].sum()\n\ndf[\"Agency Name\"].value_counts()\n\ndf.describe()\n\ndf[\"Total Floors\"].median()\n\ndf.median()\n\ndf.quantile([0.1, 0.2, 0.9])\n\ndf[\"Agency Name\"].apply(lambda a: a.upper()).head()\n\ndf[\"Agency Name\"].apply(lambda a: a).head()\n\n\"This is my string\".lower()\n\n\"this is my string. here is another.\".capitalize()\n\ndf = pd.read_csv(\"data-readonly/IL_Building_Inventory.csv\", na_values={'Year Acquired': 0, 'Year Constructed': 0})\n\ndf.count()\n\ndf.iloc[10]\n\ndf.iloc[10]\n\ndf.loc[10, [\"County\", \"Senate Dist\"]]\n\nyear = df.groupby(\"Year Acquired\")\n\ndf.index = df[\"Year Acquired\"]\n\ndf.head()\n\ndf.loc[1970].head()\n\ndf.head()\n\ndf.loc[1974]\n\ndf.loc[0]\n\ndf = pd.read_csv(\"data-readonly/IL_Building_Inventory.csv\",\n na_values={'Year Acquired': 0, 'Year Constructed': 0})\n\ndf.index\n\ndf2 = df.set_index(\"Year Acquired\")\n\ndf2.index\n\ndf2.loc[1975].head()\n\ndf2.iloc[[1974, 1975]]\n\nkeith = df.set_index(\"City\")\n\nkeith.loc[\"Kinmundy\"].describe()\n\nnames = [\"date\", \"city\", \"state\", \"country\", \"shape\", \"duration_seconds\", \"duration_reported\", \"description\", \"report_date\", \"latitude\", \"longitude\"]\n\nufo = pd.read_csv(\"data-readonly/ufo-scrubbed-geocoded-time-standardized.csv\",\n names = names, parse_dates = [\"date\", \"report_date\"])\n\nufo.dtypes\n\nufo.describe()\n\nsum_seconds = ufo.groupby(\"state\")[\"duration_seconds\"].sum()\n\nsum_seconds.sort_values() / (365*24*3600)\n\nstates = ufo.groupby(\"state\")\n\nstates[\"duration_seconds\"].mean()\n\nufo.loc[ ufo[\"duration_seconds\"] > 900 , [\"state\", \"duration_seconds\", \"shape\"] ].groupby(\"state\")[\"duration_seconds\"].sum()\n\nufo[\"date\"].min()\n\nufo[\"date\"].max()\n\nfirst_sighting = ufo.groupby(\"state\")[\"date\"].min()\nlast_sighting = ufo.groupby(\"state\")[\"date\"].max()\nlast_sighting - first_sighting\n\nfirst_sighting.index\n\nufo[\"state\"].nunique()\n\nufo[\"country\"].unique()\n\nufo[\"country\"] = ufo[\"country\"].astype(\"category\")\nufo[\"shape\"] = ufo[\"shape\"].astype(\"category\")\nufo[\"state\"] = ufo[\"state\"].astype(\"category\")\n\nufo[\"city\"].nunique()\n\nufo.shape\n\nufo.groupby(\"city\").count().nlargest(10, \"date\")\n\nufo.dtypes\n\nshape_times = ufo.groupby(\"shape\")[\"duration_seconds\"].sum()\n\nshape_times.index\n\nshape_times.plot()\n\nshape_times.sort_values().plot()\n\nshape_times.nlargest(5)\n\nshape_state = ufo.groupby([\"state\", \"shape\"])\n\ntimes = shape_state[\"duration_seconds\"].sum()\n\ntimes.loc[ [\"il\", \"mi\", \"oh\"], [\"sphere\", \"unknown\"] ]\n\ntimes.loc[\"il\":\"ok\", \"sphere\":\"unknown\"]\n\nunsorted_nonsense = times.sort_index()\n\nunsorted_nonsense.loc[\"il\":\"ok\"]\n\nunsorted_nonsense.loc[\"il\":\"ok\"].plot()\n\nufo.set_index(\"date\", inplace=True)\n\nufo.resample(\"A\")[\"duration_seconds\"].sum()\n\nmyplot = ufo.resample(\"10A\")[\"duration_seconds\"].sum().plot()\nmyplot.set_yscale('log')\n\nr = ufo.resample(\"10A\")\n\nr[\"duration_seconds\"].sum()\n\nufo.resample(\"W\")[\"duration_seconds\"].sum()\n\nday_of_week = ufo.index.dayofweek\n\nufo[\"day_of_week\"] = day_of_week\n\nufo.groupby(\"day_of_week\")[\"duration_seconds\"].sum().plot()\n\nufo.groupby(\"state\").sum().loc[\"tx\"]\n\nufo.reset_index()\n\nweek = ufo.set_index( [\"day_of_week\", \"state\", \"shape\"] )\n\nweek.loc[, 'il', 'cigar']\n\nnew_week = week.sort_index()\n\nufo = ufo.reset_index()\n\nufo.index = ufo.date\nufo.index.dayofweek"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
loli/medpy
|
notebooks/Accessing the image's meta-data.ipynb
|
gpl-3.0
|
[
"Accessing the image's meta-data\n\nIn this tutorial we will learn how to access and manipulate the image's meta-data form the header.\n\nDuring the image loading tutorial we obtained beside the image data as numpy array an additional header object. Let's first load our usual image.",
"%matplotlib inline\n\nfrom medpy.io import load\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\ni, h = load(\"flair.nii.gz\")\n\nplt.imshow(i, cmap = cm.Greys_r);",
"Now let's take a look at the header.",
"print(h)",
"That is quite a lot of information and the header appear to be of class 'nibabel.nifti1.Nifti1Image'. The reason behind this is that MedPy relies on third party librarier to save and load image. To keep the compatibility high and the maintenance requirements at a minimum, MedPy does not posess a dedicated header object, but instead returns the third party libraries image object as pseudo-header (don't worry, the image data is not kept twice).\nDepending on the third party library used, a different kind of header object can be returned. To provide image format independent access to the most important header attributes, MedPy provides some accessor-functions that work with all type of headers.\nTo query the image's voxel spacing, you can use the following.",
"from medpy.io import header\n\nprint header.get_pixel_spacing(h)",
"And correspondingly for the offest.",
"print header.get_offset(h)",
"Both of these values can also be set,",
"header.set_pixel_spacing(h, (0.8, 1.2))\n\nprint header.get_pixel_spacing(h)",
"Saving the array with the modified header, the new meta-data are stored alongside the image.",
"from medpy.io import save\n\nsave(i, \"flair_distorted.nii.gz\", h, force=True)\nj, hj = load(\"flair_distorted.nii.gz\")\n\nprint header.get_pixel_spacing(h)",
"Further meta-data from the headers is largely incompatible between formats. If you require access to additional header attributes, you can do this by querying the image header object directly. In the above case of a NiBabel class, you can, for example, query the infamous 'qform_code' of the NIfTI format.",
"print(h.header['qform_code'])",
"But be warned that such an approach leads to image format and image loade dependent code."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
CAChemE/curso-python-datos
|
notebooks/051-Pandas-Ejercicios.ipynb
|
bsd-3-clause
|
[
"Ejercicios de análisis de datos con pandas\nFuente: https://github.com/PyDataMadrid2016/Conference-Info/tree/master/workshops_materials/20160408_1100_Pandas_for_beginners/tutorial por Kiko Correoso, licencia MIT\nEn la carpeta de datos tenemos un fichero que se llama model.txt que contiene datos de medidas de viento: velocidad, orientación, temperatura...",
"!head ../data/model.txt\n\nimport pandas as pd\nimport numpy as np\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nfrom IPython.display import display\n\nmodel = pd.read_csv(\n \"../data/model.txt\", delim_whitespace=True, skiprows = 3,\n parse_dates = {'Timestamp': [0, 1]}, index_col = 'Timestamp')\n\nmodel.head()",
"Ejercicios\nSobre el conjunto de datos model:\n\n\nRepresentar la matriz scatter de la velocidad y orientación del viento de los primeros mil registros.\nMisma matriz scatter para los 1000 registros con mayor velocidad, ordenados.\nHistograma de la velocidad del viento con 36 particiones.\nHistórico de la velocidad media, con los datos agrupados por años y meses.\nTabla de velocidades medias en función del año (filas) y del mes (columnas).\nGráfica con los históricos de cada año, agrupados por meses, superpuestos.\n\n\nRepresentamos la matriz scatter de la velocidad y orientación del viento de los primeros mil registros:",
"pd.tools.plotting.scatter_matrix(model.loc[model.index[:1000], 'M(m/s)':'D(deg)'])",
"Misma matriz scatter para los 1000 registros con mayor velocidad:",
"pd.tools.plotting.scatter_matrix(\n model.loc[model.sort_values('M(m/s)', ascending=False).index[:1000],\n 'M(m/s)':'D(deg)']\n)\n\nmodel.loc[:, 'M(m/s)'].plot.hist(bins=np.arange(0, 35))\n\nmodel['month'] = model.index.month\nmodel['year'] = model.index.year",
"Histórico de la velocidad media:",
"model.groupby(by = ['year', 'month']).mean().head(24)\n\nmodel.groupby(by=['year', 'month']).mean().plot(y='M(m/s)', figsize=(15, 5))",
"Media móvil de los datos agrupados por mes y año:",
"monthly = model.groupby(by=['year', 'month']).mean()\nmonthly['ma'] = monthly.loc[:, 'M(m/s)'].rolling(5, center=True).mean()\nmonthly.head()\n\nmonthly.loc[:, ['M(m/s)', 'ma']].plot(figsize=(15, 6))\n\nmonthly.loc[:, 'M(m/s)'].reset_index().pivot(index='year', columns='month')\n\nmonthly.loc[:, 'M(m/s)'].reset_index().pivot(\n index='year', columns='month'\n).T.loc['M(m/s)'].plot(\n figsize=(15, 5), legend=False\n)\n "
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
googledatalab/notebooks
|
tutorials/Stackdriver Monitoring/Group metrics.ipynb
|
apache-2.0
|
[
"Look at metric data based on Stackdriver groups\nIn this tutorial, we see how to look at the group structure in a project, and how to use groups to filter and aggregate metric data. For this tutorial to execute completely, you need to have:\n* a Stackdriver account\n* at least one Stackdriver group in your account\n* the above group must have at least one GCE instance in it\nNote: It is recommended that you look at the Getting Started notebook first to familiarize yourself with the Query class and how pandas dataframes are used to represent time series data.\nLoad the monitoring module and set the default project\nIf there is no default project set already, you must do so using 'set_datalab_project_id'. Note, the project you set here must be the hosting project for a Stackdriver account, otherwise you will see the following error while loading the groups in the next cell:\n`BadRequest: 400 The project 'my-project-id' is not a Stackdriver account project.`",
"# set_datalab_project_id('my-project-id')",
"List the Stackdriver groups\nLoad the Stackdriver groups in the default project, and get the dataframe containing all the information.",
"from google.datalab.stackdriver import monitoring as gcm\n\ngroups_dataframe = gcm.Groups().as_dataframe()\n\n# Sort the dataframe by the group name, and reset the index.\ngroups_dataframe = groups_dataframe.sort_values(by='Group name').reset_index(drop=True)\ngroups_dataframe.head(5)",
"Extract the first group\nNow we initialize first_group_id from the list of Stackdriver groups. Please note:\n\nIf you don't have any groups so far, please create one via the Stackdriver dashboard.\nFurther, if the first group does not contain any GCE instances, please explicitly set first_group_id to the ID of a group that does have an instance.",
"import sys\n\nif groups_dataframe.empty:\n sys.stderr.write('This project has no Stackdriver groups. The remaining notebook '\n 'will raise errors!')\nelse:\n first_group_id = groups_dataframe['Group ID'][0]\n print('First group ID: %s' % first_group_id)",
"Load the CPU metric data for the instances a given group\nLoad the CPU Utilization for last 2 hours for the group with the ID first_group_id. The time series is further aggregated as follows:\n* The data is aligned to 5 minute intervals using the 'ALIGN_MEAN' method.\n* The data per zone and instance_name pair is combined or reduced into a single time series. This is useful to combine the time series of instances that might be restarted / redeployed continuously.",
"# Initialize the query for the CPU Utilization metric over the last 2 hours.\nquery_group = gcm.Query('compute.googleapis.com/instance/cpu/utilization', hours=2)\n\n# Filter the instances to the members of the first group.\nquery_group = query_group.select_group(first_group_id)\n\n# Aggregate the time series.\nquery_group = query_group.align(gcm.Aligner.ALIGN_MEAN, minutes=5)\nquery_group = query_group.reduce(gcm.Reducer.REDUCE_MEAN, 'resource.zone', 'metric.instance_name')\n\n# Create a dataframe with zone and instance name in the headers.\ncpu_group_dataframe = query_group.as_dataframe(labels=['zone', 'instance_name'])\ncpu_group_dataframe.tail(5)",
"Plot the the mean of the CPU Utilization per zone",
"cpu_group_dataframe_per_zone = cpu_group_dataframe.groupby(level=0, axis=1).mean()\n_ = cpu_group_dataframe_per_zone.plot().legend(loc='center left', bbox_to_anchor=(1.0, 0.8))",
"Plot the CPU Utilization of instances\nNow, we plot the chart at the instance level. However, instances in each zone are displayed in a separate chart.",
"# Find all unique zones and sort them.\nall_zones = sorted(set(cpu_group_dataframe.columns.get_level_values('zone')))\n\n# Find the global min and max so we can set the same range for all y-axes.\nmin_cpu = cpu_group_dataframe.min().min()\nmax_cpu = cpu_group_dataframe.max().max()\n\nfor zone in all_zones:\n zone_plot = cpu_group_dataframe[zone].plot(title=zone, ylim=(min_cpu, max_cpu))\n zone_plot.legend(loc='center left', bbox_to_anchor=(1.0, 0.8))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tombstone/models
|
research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb
|
apache-2.0
|
[
"Context R-CNN Demo\n<table align=\"left\"><td>\n <a target=\"_blank\" href=\"https://colab.sandbox.google.com/github/tensorflow/models/blob/master/research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab\n </a>\n</td><td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/models/blob/master/research/object_detection/colab_tutorials/context_rcnn_tutorial.ipynb\">\n <img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n</td></table>\n\nThis notebook will walk you step by step through the process of using a pre-trained model to build up a contextual memory bank for a set of images, and then detect objects in those images+context using Context R-CNN.\nSetup\nImportant: If you're running on a local machine, be sure to follow the installation instructions. This notebook includes only what's necessary to run in Colab.\nInstall",
"!pip install -U --pre tensorflow==\"2.*\"\n!pip install tf_slim",
"Make sure you have pycocotools installed",
"!pip install pycocotools",
"Get tensorflow/models or cd to parent directory of the repository.",
"import os\nimport pathlib\n\n\nif \"models\" in pathlib.Path.cwd().parts:\n while \"models\" in pathlib.Path.cwd().parts:\n os.chdir('..')\nelif not pathlib.Path('models').exists():\n !git clone --depth 1 https://github.com/tensorflow/models",
"Compile protobufs and install the object_detection package",
"%%bash\ncd models/research/\nprotoc object_detection/protos/*.proto --python_out=.\n\n%%bash \ncd models/research\npip install .",
"Imports",
"import numpy as np\nimport os\nimport six\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport pathlib\nimport json\nimport datetime\nimport matplotlib.pyplot as plt\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nfrom IPython.display import display",
"Import the object detection module.",
"from object_detection.utils import ops as utils_ops\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_utils",
"Patches:",
"# patch tf1 into `utils.ops`\nutils_ops.tf = tf.compat.v1\n\n# Patch the location of gfile\ntf.gfile = tf.io.gfile",
"Model preparation\nLoader",
"def load_model(model_name):\n base_url = 'http://download.tensorflow.org/models/object_detection/'\n model_file = model_name + '.tar.gz'\n model_dir = tf.keras.utils.get_file(\n fname=model_name,\n origin=base_url + model_file,\n untar=True)\n\n model_dir = pathlib.Path(model_dir)/\"saved_model\"\n model = tf.saved_model.load(str(model_dir))\n model = model.signatures['serving_default']\n\n return model",
"Loading label map\nLabel maps map indices to category names, so that when our convolution network predicts 5, we know that this corresponds to zebra. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine",
"# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = 'models/research/object_detection/data/snapshot_serengeti_label_map.pbtxt'\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=False)",
"We will test on a context group of images from one month at one camera from the Snapshot Serengeti val split defined on LILA.science, which was not seen during model training:",
"# If you want to test the code with your images, just add path to the images to\n# the TEST_IMAGE_PATHS.\nPATH_TO_TEST_IMAGES_DIR = pathlib.Path('models/research/object_detection/test_images/snapshot_serengeti')\nTEST_IMAGE_PATHS = sorted(list(PATH_TO_TEST_IMAGES_DIR.glob(\"*.jpeg\")))\nTEST_IMAGE_PATHS",
"Load the metadata for each image",
"test_data_json = 'models/research/object_detection/test_images/snapshot_serengeti/context_rcnn_demo_metadata.json'\nwith open(test_data_json, 'r') as f:\n test_metadata = json.load(f)\n\nimage_id_to_datetime = {im['id']:im['date_captured'] for im in test_metadata['images']}\nimage_path_to_id = {im['file_name']: im['id'] \n for im in test_metadata['images']}\nimage_path_to_id",
"Generate Context Features for each image",
"faster_rcnn_model_name = 'faster_rcnn_resnet101_snapshot_serengeti_2020_06_10'\nfaster_rcnn_model = load_model(faster_rcnn_model_name)",
"Check the model's input signature, it expects a batch of 3-color images of type uint8.",
"faster_rcnn_model.inputs",
"And it returns several outputs. Note this model has been exported with additional output 'detection_features' which will be used to build the contextual memory bank.",
"faster_rcnn_model.output_dtypes\n\nfaster_rcnn_model.output_shapes",
"Add a wrapper function to call the model, and cleanup the outputs:",
"def run_inference_for_single_image(model, image):\n '''Run single image through tensorflow object detection saved_model.\n\n This function runs a saved_model on a (single) provided image and returns\n inference results in numpy arrays.\n\n Args:\n model: tensorflow saved_model. This model can be obtained using \n export_inference_graph.py.\n image: uint8 numpy array with shape (img_height, img_width, 3)\n\n Returns:\n output_dict: a dictionary holding the following entries:\n `num_detections`: an integer\n `detection_boxes`: a numpy (float32) array of shape [N, 4]\n `detection_classes`: a numpy (uint8) array of shape [N]\n `detection_scores`: a numpy (float32) array of shape [N]\n `detection_features`: a numpy (float32) array of shape [N, 7, 7, 2048]\n '''\n image = np.asarray(image)\n # The input needs to be a tensor, convert it using `tf.convert_to_tensor`.\n input_tensor = tf.convert_to_tensor(image)\n # The model expects a batch of images, so add an axis with `tf.newaxis`.\n input_tensor = input_tensor[tf.newaxis,...]\n\n # Run inference\n output_dict = model(input_tensor)\n # All outputs are batches tensors.\n # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n # We're only interested in the first num_detections.\n num_dets = output_dict.pop('num_detections')\n num_detections = int(num_dets)\n for key,value in output_dict.items():\n output_dict[key] = value[0, :num_detections].numpy() \n output_dict['num_detections'] = num_detections\n\n # detection_classes should be ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(\n np.int64)\n return output_dict",
"Functions for embedding context features",
"def embed_date_captured(date_captured):\n \"\"\"Encodes the datetime of the image.\n\n Takes a datetime object and encodes it into a normalized embedding of shape \n [5], using hard-coded normalization factors for year, month, day, hour,\n minute.\n\n Args:\n date_captured: A datetime object.\n\n Returns:\n A numpy float32 embedding of shape [5].\n \"\"\"\n embedded_date_captured = []\n month_max = 12.0\n day_max = 31.0\n hour_max = 24.0\n minute_max = 60.0\n min_year = 1990.0\n max_year = 2030.0\n\n year = (date_captured.year-min_year)/float(max_year-min_year)\n embedded_date_captured.append(year)\n\n month = (date_captured.month-1)/month_max\n embedded_date_captured.append(month)\n\n day = (date_captured.day-1)/day_max\n embedded_date_captured.append(day)\n\n hour = date_captured.hour/hour_max\n embedded_date_captured.append(hour)\n\n minute = date_captured.minute/minute_max\n embedded_date_captured.append(minute)\n\n return np.asarray(embedded_date_captured)\n\ndef embed_position_and_size(box):\n \"\"\"Encodes the bounding box of the object of interest.\n\n Takes a bounding box and encodes it into a normalized embedding of shape \n [4] - the center point (x,y) and width and height of the box.\n\n Args:\n box: A bounding box, formatted as [ymin, xmin, ymax, xmax].\n\n Returns:\n A numpy float32 embedding of shape [4].\n \"\"\"\n ymin = box[0]\n xmin = box[1]\n ymax = box[2]\n xmax = box[3]\n w = xmax - xmin\n h = ymax - ymin\n x = xmin + w / 2.0\n y = ymin + h / 2.0\n return np.asarray([x, y, w, h])\n\ndef get_context_feature_embedding(date_captured, detection_boxes,\n detection_features, detection_scores):\n \"\"\"Extracts representative feature embedding for a given input image.\n\n Takes outputs of a detection model and focuses on the highest-confidence\n detected object. Starts with detection_features and uses average pooling to\n remove the spatial dimensions, then appends an embedding of the box position\n and size, and an embedding of the date and time the image was captured,\n returning a one-dimensional representation of the object.\n\n Args:\n date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n detection_features: A numpy (float32) array of shape [N, 7, 7, 2048].\n detection_boxes: A numpy (float32) array of shape [N, 4].\n detection_scores: A numpy (float32) array of shape [N].\n\n Returns:\n A numpy float32 embedding of shape [2057].\n \"\"\"\n date_captured = datetime.datetime.strptime(date_captured,'%Y-%m-%d %H:%M:%S')\n temporal_embedding = embed_date_captured(date_captured)\n embedding = detection_features[0]\n pooled_embedding = np.mean(np.mean(embedding, axis=1), axis=0)\n box = detection_boxes[0]\n position_embedding = embed_position_and_size(box)\n bb_embedding = np.concatenate((pooled_embedding, position_embedding))\n embedding = np.expand_dims(np.concatenate((bb_embedding,temporal_embedding)),\n axis=0)\n score = detection_scores[0]\n return embedding, score",
"Run it on each test image and use the output detection features and metadata to build up a context feature bank:",
"def run_inference(model, image_path, date_captured, resize_image=True):\n \"\"\"Runs inference over a single input image and extracts contextual features.\n\n Args:\n model: A tensorflow saved_model object.\n image_path: Absolute path to the input image.\n date_captured: A datetime string of format '%Y-%m-%d %H:%M:%S'.\n resize_image: Whether to resize the input image before running inference.\n\n Returns:\n context_feature: A numpy float32 array of shape [2057].\n score: A numpy float32 object score for the embedded object.\n output_dict: The saved_model output dictionary for the image.\n \"\"\"\n with open(image_path,'rb') as f:\n image = Image.open(f)\n if resize_image:\n image.thumbnail((640,640),Image.ANTIALIAS)\n image_np = np.array(image)\n\n # Actual detection.\n output_dict = run_inference_for_single_image(model, image_np)\n\n context_feature, score = get_context_feature_embedding(\n date_captured, output_dict['detection_boxes'],\n output_dict['detection_features'], output_dict['detection_scores'])\n return context_feature, score, output_dict\n\ncontext_features = []\nscores = []\nfaster_rcnn_results = {}\nfor image_path in TEST_IMAGE_PATHS:\n image_id = image_path_to_id[str(image_path)]\n date_captured = image_id_to_datetime[image_id]\n context_feature, score, results = run_inference(\n faster_rcnn_model, image_path, date_captured)\n faster_rcnn_results[image_id] = results\n context_features.append(context_feature)\n scores.append(score)\n\n# Concatenate all extracted context embeddings into a contextual memory bank.\ncontext_features_matrix = np.concatenate(context_features, axis=0)\n",
"Run Detection With Context\nLoad a context r-cnn object detection model:",
"context_rcnn_model_name = 'context_rcnn_resnet101_snapshot_serengeti_2020_06_10'\ncontext_rcnn_model = load_model(context_rcnn_model_name)\n",
"We need to define the expected context padding size for the\nmodel, this must match the definition in the model config (max_num_context_features).",
"context_padding_size = 2000",
"Check the model's input signature, it expects a batch of 3-color images of type uint8, plus context_features padded to the maximum context feature size for this model (2000) and valid_context_size to represent the non-padded context features:",
"context_rcnn_model.inputs",
"And returns several outputs:",
"context_rcnn_model.output_dtypes\n\ncontext_rcnn_model.output_shapes\n\ndef run_context_rcnn_inference_for_single_image(\n model, image, context_features, context_padding_size):\n '''Run single image through a Context R-CNN saved_model.\n\n This function runs a saved_model on a (single) provided image and provided \n contextual features and returns inference results in numpy arrays.\n\n Args:\n model: tensorflow Context R-CNN saved_model. This model can be obtained\n using export_inference_graph.py and setting side_input fields. \n Example export call - \n python export_inference_graph.py \\\n --input_type image_tensor \\\n --pipeline_config_path /path/to/context_rcnn_model.config \\\n --trained_checkpoint_prefix /path/to/context_rcnn_model.ckpt \\\n --output_directory /path/to/output_dir \\\n --use_side_inputs True \\\n --side_input_shapes 1,2000,2057/1 \\\n --side_input_names context_features,valid_context_size \\\n --side_input_types float,int \\\n --input_shape 1,-1,-1,3\n\n image: uint8 numpy array with shape (img_height, img_width, 3)\n context_features: A numpy float32 contextual memory bank of shape \n [num_context_examples, 2057]\n context_padding_size: The amount of expected padding in the contextual\n memory bank, defined in the Context R-CNN config as \n max_num_context_features.\n\n Returns:\n output_dict: a dictionary holding the following entries:\n `num_detections`: an integer\n `detection_boxes`: a numpy (float32) array of shape [N, 4]\n `detection_classes`: a numpy (uint8) array of shape [N]\n `detection_scores`: a numpy (float32) array of shape [N]\n '''\n image = np.asarray(image)\n # The input image needs to be a tensor, convert it using \n # `tf.convert_to_tensor`.\n image_tensor = tf.convert_to_tensor(\n image, name='image_tensor')[tf.newaxis,...]\n\n context_features = np.asarray(context_features)\n valid_context_size = context_features.shape[0]\n valid_context_size_tensor = tf.convert_to_tensor(\n valid_context_size, name='valid_context_size')[tf.newaxis,...]\n padded_context_features = np.pad(\n context_features,\n ((0,context_padding_size-valid_context_size),(0,0)), mode='constant')\n padded_context_features_tensor = tf.convert_to_tensor(\n padded_context_features,\n name='context_features',\n dtype=tf.float32)[tf.newaxis,...]\n\n # Run inference\n output_dict = model(\n inputs=image_tensor,\n context_features=padded_context_features_tensor,\n valid_context_size=valid_context_size_tensor)\n # All outputs are batches tensors.\n # Convert to numpy arrays, and take index [0] to remove the batch dimension.\n # We're only interested in the first num_detections.\n num_dets = output_dict.pop('num_detections')\n num_detections = int(num_dets)\n for key,value in output_dict.items():\n output_dict[key] = value[0, :num_detections].numpy() \n output_dict['num_detections'] = num_detections\n\n # detection_classes should be ints.\n output_dict['detection_classes'] = output_dict['detection_classes'].astype(np.int64)\n return output_dict\n\ndef show_context_rcnn_inference(\n model, image_path, context_features, faster_rcnn_output_dict,\n context_padding_size, resize_image=True):\n \"\"\"Runs inference over a single input image and visualizes Faster R-CNN vs. \n Context R-CNN results.\n\n Args:\n model: A tensorflow saved_model object.\n image_path: Absolute path to the input image.\n context_features: A numpy float32 contextual memory bank of shape \n [num_context_examples, 2057]\n faster_rcnn_output_dict: The output_dict corresponding to this input image\n from the single-frame Faster R-CNN model, which was previously used to\n build the memory bank.\n context_padding_size: The amount of expected padding in the contextual\n memory bank, defined in the Context R-CNN config as \n max_num_context_features.\n resize_image: Whether to resize the input image before running inference.\n\n Returns:\n context_rcnn_image_np: Numpy image array showing Context R-CNN Results.\n faster_rcnn_image_np: Numpy image array showing Faster R-CNN Results.\n \"\"\"\n\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n with open(image_path,'rb') as f:\n image = Image.open(f)\n if resize_image:\n image.thumbnail((640,640),Image.ANTIALIAS)\n image_np = np.array(image)\n image.thumbnail((400,400),Image.ANTIALIAS)\n context_rcnn_image_np = np.array(image)\n \n faster_rcnn_image_np = np.copy(context_rcnn_image_np)\n\n # Actual detection.\n output_dict = run_context_rcnn_inference_for_single_image(\n model, image_np, context_features, context_padding_size)\n\n # Visualization of the results of a context_rcnn detection.\n vis_utils.visualize_boxes_and_labels_on_image_array(\n context_rcnn_image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n line_thickness=2)\n \n # Visualization of the results of a faster_rcnn detection.\n vis_utils.visualize_boxes_and_labels_on_image_array(\n faster_rcnn_image_np,\n faster_rcnn_output_dict['detection_boxes'],\n faster_rcnn_output_dict['detection_classes'],\n faster_rcnn_output_dict['detection_scores'],\n category_index,\n use_normalized_coordinates=True,\n line_thickness=2)\n return context_rcnn_image_np, faster_rcnn_image_np",
"Define Matplotlib parameters for pretty visualizations",
"%matplotlib inline\nplt.rcParams['axes.grid'] = False\nplt.rcParams['xtick.labelsize'] = False\nplt.rcParams['ytick.labelsize'] = False\nplt.rcParams['xtick.top'] = False\nplt.rcParams['xtick.bottom'] = False\nplt.rcParams['ytick.left'] = False\nplt.rcParams['ytick.right'] = False\nplt.rcParams['figure.figsize'] = [15,10]",
"Run Context R-CNN inference and compare results to Faster R-CNN",
"for image_path in TEST_IMAGE_PATHS:\n image_id = image_path_to_id[str(image_path)]\n faster_rcnn_output_dict = faster_rcnn_results[image_id]\n context_rcnn_image, faster_rcnn_image = show_context_rcnn_inference(\n context_rcnn_model, image_path, context_features_matrix,\n faster_rcnn_output_dict, context_padding_size)\n plt.subplot(1,2,1)\n plt.imshow(faster_rcnn_image)\n plt.title('Faster R-CNN')\n plt.subplot(1,2,2)\n plt.imshow(context_rcnn_image)\n plt.title('Context R-CNN')\n plt.show()",
"The images used in this demo are from the Snapshot Serengeti dataset, and released under the Community Data License Agreement (permissive variant)."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
btabibian/misc_notebooks
|
Nature-Cooperation.ipynb
|
mit
|
[
"%matplotlib inline\nfrom IPython.core.display import HTML\nHTML(\"\"\"\n<style>\n.output_png {\n display: table-cell;\n text-align: center;\n vertical-align: middle;\n}\n</style>\n\"\"\")\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import integrate\nfrom scipy import linalg\nimport ipywidgets as widgets\nfrom IPython.display import display, clear_output\nimport matplotlib.gridspec as gridspec\nsns.set_context(\"talk\")\nsns.set_style(\"whitegrid\")",
"Hacking into Evolutionary Dynamics!\nThis Jupyter notebook implements some of the ideas in following two books, specifically chapters 1-5 in Evolutionary Dynamics. For better undrestanding of the equations and code please consult the books and relevant papers.\nThis notebook contains interactive contents using Javascript, please download and execute it on Jupyter.",
"%%html\n<div >\n<iframe type=\"text/html\" width=\"336\" height=\"550\" frameborder=\"0\" allowfullscreen style=\"max-width:100%;float: left\" src=\"https://lesen.amazon.de/kp/card?asin=B003UV8TC2&preview=inline&linkCode=kpe&ref_=cm_sw_r_kb_dp_MamPyb1NWT7A8\" ></iframe>\n</div>\n<div >\n<iframe type=\"text/html\" width=\"336\" height=\"550\" frameborder=\"0\" allowfullscreen style=\"max-width:100%;float: right\" src=\"https://lesen.amazon.de/kp/card?asin=B00J97FFRI&preview=inline&linkCode=kpe&ref_=cm_sw_r_kb_dp_PfmPyb5ZV4AP8\" ></iframe>\n</div>\n",
"Evolution\nBasic model\n\\begin{align}\n\\dot{x} = \\frac{dx}{dt} = (r-d)x(1-x/K)\n\\end{align}\n\n$r$: reproduction rate\n$d$: hazard rate\n$K$: Maximum capacity",
"fig = plt.figure()\nplt.close(fig)\ndef oneCell(r,d,max_x):\n clear_output(wait=True)\n t_f = 10 \n dt = 0.1\n def int_(t,x):\n dev = x*(r-d)\n if max_x != None:\n dev *= (1-x/max_x)\n #print(\"dev\",dev,x)\n return dev\n\n integ = integrate.ode(int_)\n y = np.zeros(int(t_f/dt)+1)\n x = np.zeros(int(t_f/dt)+1)\n xdot = np.zeros(int(t_f/dt)+1)\n integ.set_integrator(\"dopri5\").set_initial_value(0.01)\n i = 0\n while integ.successful() and integ.t<t_f:\n y[i] = integ.y\n x[i] = integ.t\n xdot[i] = int_(integ.t,y[i])\n integ.integrate(integ.t+dt)\n i=i+1\n fig.clf()\n ax = fig.gca()\n ax.plot(x,y,label=\"population size\")\n ax.set_ylim(-0.6,3.0)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"population size\")\n \n ax2 = ax.twinx()\n with sns.color_palette(\"PuBuGn_d\",n_colors=1):\n ax2.plot(x, xdot, label=\"derivative\",linestyle='--')\n ax2.set_ylabel('$\\dot{x}$', rotation=0)\n ax2.grid('off')\n ax.legend(loc=2)\n ax2.legend()\n ax2.set_ylim(0.,0.25)\n display(fig)\n return\n\nitems = [\n widgets.FloatSlider(\n value=1.5,\n min=0,\n max=2.0,\n step=0.01,\n description=\"r\",layout=widgets.Layout(width='100%', height='80px'))\n ,widgets.FloatSlider(\n value=.0,\n min=0,\n max=2.0,\n step=0.01,\n description=\"d\",layout=widgets.Layout(width='100%', height='80px'))]\nmax_k = [widgets.FloatSlider(\n value=1.5,\n min=1,\n max=2.0,\n step=0.01,\n description=\"K\",layout=widgets.Layout(width='100%', height='80px')),\n widgets.Checkbox(\n value=False,\n description=\"enforce K\",layout=widgets.Layout(width='100%', height='80px'))]\n\ndef call_back_r(v):\n if max_k[1].value is False:\n return oneCell(items[0].value,items[1].value,None)\n else:\n return oneCell(items[0].value,items[1].value,max_k[0].value)\n\nbox_h = widgets.VBox(items,layout=widgets.Layout(width='100%', height='80px'))\nbox_h_max = widgets.VBox(items,layout=widgets.Layout(width='100%', height='80px'))\nbox = widgets.VBox([box_h]+[widgets.HBox(max_k)])\nitems[0].observe(call_back_r,names='value')\nitems[1].observe(call_back_r,names='value')\nmax_k[0].observe(call_back_r,names='value')\nmax_k[1].observe(call_back_r,names='value')\ndisplay(box)",
"Selection-Mutation\n\nSelection operates whenever different types of individuals reproduce at different rates.\n\n\\begin{align}\n\\dot{\\vec{x}} =\\vec{x}Q-\\phi\\vec{x}.\n\\end{align}\n\n$\\vec{x}$: population ratio of type $i$.\n$Q$: Mutation matrix.\n$\\phi$: average fitness",
"fig = plt.figure()\nplt.close(fig)\ndef twoCell(init_,rate):\n clear_output(wait=True)\n t_f = 10 \n dt = 0.1\n update_rate = np.asarray(rate)\n def int_(t,x):\n dev = x.T.dot(update_rate)-x\n return dev\n\n integ = integrate.ode(int_)\n y = np.zeros((int(t_f/dt)+1,update_rate.shape[0]))\n x = np.zeros((int(t_f/dt)+1,update_rate.shape[0]))\n xdot = np.zeros((int(t_f/dt)+1,update_rate.shape[0]))\n integ.set_integrator(\"dopri5\").set_initial_value(np.asarray(init_))\n i = 0\n while integ.successful() and integ.t<t_f:\n y[i,:] = integ.y\n x[i,:] = integ.t\n xdot[i,:] = int_(integ.t,y[i,:])\n integ.integrate(integ.t+dt)\n i=i+1\n fig.clf()\n ax = fig.gca()\n with sns.color_palette(\"PuBuGn_d\",n_colors=x.shape[1]):\n for ind_ in range(x.shape[1]):\n ax.plot(x[:,ind_], y[:,ind_], label=\"type \"+str(ind_ +1))\n \n ax.set_ylim(-0.1,1.1)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"population ratio\")\n \n ax2 = ax.twinx()\n with sns.color_palette(\"PuBuGn_d\",n_colors=x.shape[1]):\n for ind_ in range(x.shape[1]):\n ax2.plot(x[:,ind_], xdot[:,ind_], label=\"d type \"+str(ind_ +1),linestyle='--')\n ax2.set_ylabel('$\\dot{x}$', rotation=0)\n ax2.grid('off')\n ax.legend(ncol=x.shape[1])\n ax2.legend(loc=4,ncol=x.shape[1])\n display(fig)\n return\n\nitems_mute = [\n widgets.IntText(\n value=2,\n min=2,\n max=5.0,\n description=\"r\",layout=widgets.Layout(width='50%', height='80px'))\n ,widgets.Button(\n description=\"submit\")]\n\ndef updateplot(v,objects,status_label):\n init = []\n rates = []\n for ind_,obj in enumerate(objects):\n if ind_ < len(objects)-1:\n init.append(obj[0].value)\n else:\n if sum(init)>1:\n status_label.value = \"Initial rates should sum to <1\"\n return\n else:\n status_label.value = \"\"\n init.append(1-sum(init))\n rate_ = []\n for j in range(1,len(objects)):\n rate_.append(obj[j].value)\n if sum(rate_)>1:\n status_label.value = \"sum of mutation rates should sum to <1\"\n return\n else:\n status_label.value = \"\"\n \n rate_.append(1-sum(rate_))\n rates.append(rate_)\n init = np.asarray(init)\n rates = np.asarray(rates)\n twoCell(init,rates)\n return\n\ndef call_back_mute(count,objects,status_label,updateplot):\n dsps = []\n for i in range(count):\n if i < count-1:\n specie = [widgets.FloatSlider(\n value=1.0/count,\n min=0,\n max=1.0,\n step=0.01,\n description=\"init \"+str(i+1),layout=widgets.Layout(width='100%', height='80px'))]\n else:\n specie = [widgets.Label(layout=widgets.Layout(width='100%', height='80px'))]\n\n for j in range(count-1):\n wid = widgets.FloatSlider(\n value=1 if j == i else 0,\n min=0,\n max=1.0,\n step=0.01,\n description=\"rate_\"+str(i+1)+\"_\"+str(j+1),layout=widgets.Layout(width='100%', height='80px'))\n wid.observe(updateplot,names='value')\n specie.append(wid)\n \n specie[0].observe(updateplot,names='value')\n box_h = widgets.HBox(specie,layout=widgets.Layout(width='100%', height='80px'))\n objects.append(specie)\n dsps.append(box_h)\n status_label = widgets.Label()\n box_v = widgets.VBox(dsps+[status_label],layout=widgets.Layout(width='100%', height='80px'))\n \n display(box_v)\n updateplot(\"\")\n return objects\n#items_mute[1].on_click(call_back_mute)\n\n#box_h = widgets.HBox(items_mute,layout=widgets.Layout(width='100%', height='80px'))\n#display(box_h)\nobjects = []\nstatus_label = widgets.Label()\n_ = call_back_mute(2,objects,status_label,lambda x:updateplot(x,objects,status_label))",
"Multiple species.",
"objects_1 = []\nstatus_label_1 = widgets.Label()\n_ = call_back_mute(3,objects_1,status_label_1,lambda x:updateplot(x,objects_1,status_label_1))",
"Genomes are Sequences\nQuasispecies equation\n\\begin{align}\n\\dot{x_i} =\\sum_{j=0}^{n} x_j ~ f_j ~ q_{ji} - \\phi x_i.\n\\end{align}\n\n$x$: population ratio of type $i$.\n$f_i$: fitness for type $i$.\n$q_{ji}$: probability of mutation from type $j$ to $i$\n$q_{ji} = u^{h_ij}(1-u)^{L-h_{ij}}$ $~L:$ Length of genome. $~u:$ mutation prob. at one gene.",
"fig = plt.figure()\nplt.close(fig)\n\ndef genomeSequence(N,drich_alpha,point_mut):\n np.random.seed(0)\n clear_output(wait=True)\n if point_mut is not None:\n L,u = point_mut\n t_f = 10 \n dt = 0.1\n \n x_ = np.random.uniform(size=(N))\n x_ = x_/x_.sum()\n\n f = np.random.lognormal(size=(N))\n \n if drich_alpha is not None:\n Q = np.zeros((N,N))\n for j in range(N): \n Q[j,:] = np.random.dirichlet(np.roll(np.logspace(1,drich_alpha+1,N)[::-1], j), 1)\n elif point_mut is not None:\n Q = np.zeros((N,N))\n for j in range(N):\n for i in range(N):\n Q[j,i] = (u**(np.abs(j-i)))*((1-u)**(L-np.abs(j-i)))\n else:\n print(\"One of the two arguments should not be None\")\n return\n \n def int_(t,x):\n x = np.asarray(x).reshape((x.shape[0],1))\n dev = np.zeros(x.shape[0])\n mean = f.dot(x)\n for i in range(x.shape[0]):\n for j in range(x.shape[0]):\n dev[i] += f[j]*Q[j,i]*x[j]\n dev[i] -= mean*x[i]\n return dev\n\n integ = integrate.ode(int_)\n integ.set_integrator(\"dopri5\").set_initial_value(np.asarray(x_))\n y = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n x = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n xdot = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n\n i = 0\n while integ.successful() and integ.t<t_f:\n y[i,:] = integ.y\n x[i,:] = integ.t\n xdot[i,:] = int_(integ.t,y[i,:])\n integ.integrate(integ.t+dt)\n i=i+1\n \n fig.clf()\n \n \n ax = fig.gca()\n with sns.color_palette(\"PuBuGn_d\",n_colors=2):\n for ind_ in range(x.shape[1]):\n ax.plot(x[:,ind_], y[:,ind_], label=(\"$f_%d$: %.2f\" % (ind_ +1,f[ind_])))\n \n ax.set_ylim(-0.1,1.1)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"Quasi specie\")\n \n ax2 = ax.twinx()\n with sns.color_palette(\"PuBuGn_d\",n_colors=2):\n ax2.plot(np.arange(0,t_f+dt,dt),y.dot(f), label=\"fitness \",linestyle='-.')\n ax2.set_ylabel('$f$', rotation=0)\n ax2.set_ylim(0,3)\n ax2.grid('off')\n ax.legend(ncol=min(4,x.shape[1]))\n ax2.legend(loc=4)\n display(fig)\n return\n\nitems_gene = [\n widgets.IntSlider(\n value=2,\n min=2,\n max=6,\n description=\"# Genomes\",layout=widgets.Layout(width='80%', height='300px')),\n widgets.IntSlider(\n value=10,\n min=7,\n max=15,\n description=\"Max Length\",layout=widgets.Layout(width='80%', height='230px')),\n widgets.FloatSlider(\n value=0.1,\n min=0.01,\n max=0.3,\n step=0.05,\n description=\"u\",layout=widgets.Layout(width='80%', height='100px'))]\n\ndef _GeneCall(v):\n return genomeSequence(items_gene[0].value,None,(items_gene[1].value,items_gene[2].value))\n \nbox_h = widgets.VBox(items_gene,layout=widgets.Layout(width='100%', height='80px'))\nitems_gene[0].observe(_GeneCall,names='value')\nitems_gene[1].observe(_GeneCall,names='value')\nitems_gene[2].observe(_GeneCall,names='value')\ndisplay(box_h)\n_GeneCall(0)",
"Fitness Landscape\n\\begin{align}\n\\dot{x_0} =& x_0(f_0q-\\phi)\\\n\\dot{x_1} =& x_0f_0(1-q)+x_1-\\phi x_1\n\\end{align}\n\n$q = (1-u)^L$: probability of exact copy of master genome.\n$u$: probability of a mutation on one gene.\n$L$: length of genome.",
"fig = plt.figure()\nplt.close(fig)\n\ndef genomeSequenceQ(f_0,u,L):\n np.random.seed(0)\n clear_output(wait=True)\n t_f = 10 \n dt = 0.1\n \n x_ = np.random.uniform(size=2)\n x_ = x_/x_.sum()\n\n f = np.array([f_0,1])\n q = (1-u)**L\n \n def int_(t,x):\n mean = f[0]*x[0]+f[1]*x[1]\n dev = np.zeros(x.shape[0])\n dev[0] = x[0]*(f[0]*q - mean)\n dev[1] = x[0]*f[0]*(1-q)+x[1] - mean*x[1]\n return dev\n\n integ = integrate.ode(int_)\n integ.set_integrator(\"dopri5\").set_initial_value(np.asarray(x_))\n y = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n x = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n xdot = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n\n i = 0\n while integ.successful() and integ.t<t_f:\n y[i,:] = integ.y\n x[i,:] = integ.t\n xdot[i,:] = int_(integ.t,y[i,:])\n integ.integrate(integ.t+dt)\n i=i+1\n \n fig.clf()\n ax = fig.gca()\n with sns.color_palette(\"PuBuGn_d\",n_colors=2):\n for ind_ in range(x.shape[1]):\n ax.plot(x[:,ind_], y[:,ind_], label=(\"$f_%d$: %.2f\" % (ind_ ,f[ind_])))\n \n ax.set_ylim(-0.1,1.1)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"Quasi specie\")\n \n ax2 = ax.twinx()\n with sns.color_palette(\"PuBuGn_d\",n_colors=2):\n ax2.plot(np.arange(0,t_f+dt,dt),y.dot(f), label=\"fitness \",linestyle='-.')\n ax2.set_ylabel('$f$', rotation=0)\n ax2.set_ylim(0,10)\n ax2.grid('off')\n ax.legend(ncol=min(4,x.shape[1]))\n ax2.legend(loc=4)\n display(fig)\n return q\n\nitems_geneQ = [\n widgets.IntSlider(\n value=5,\n min=2,\n max=12,\n description=\"Genome Length\",layout=widgets.Layout(width='50%', height='80px')),\n widgets.FloatSlider(\n value=0.05,\n min=0.01,\n max=0.8,\n step = 0.05,\n description=\"mutatation rate\",layout=widgets.Layout(width='50%', height='80px')),\n widgets.FloatSlider(\n value=1,\n min=0.0,\n max=40,\n step=0.05,\n description=\"max_f\",layout=widgets.Layout(width='50%', height='80px'))]\n\ndef _GeneCallQ(v):\n q_ = genomeSequenceQ(items_geneQ[2].value,items_geneQ[1].value,items_geneQ[0].value)\n label.value= \"f_0 q = %.2f\" % (q_*items_geneQ[2].value)\n return\n\n \nbox_h = widgets.VBox(items_geneQ,layout=widgets.Layout(width='100%', height='120px'))\nlabel = widgets.Label()\nbox_v = widgets.VBox([box_h,label])\nitems_geneQ[0].observe(_GeneCallQ,names='value')\nitems_geneQ[1].observe(_GeneCallQ,names='value')\nitems_geneQ[2].observe(_GeneCallQ,names='value')\ndisplay(box_v)\n_GeneCallQ(0)\n\n%%html\n\n<center><img height=\"100%\" width=\"100%\" src=\"./Nature-coop/mutation_rates.png\"/>\n</center>\n",
"Evolutionary Games\nTwo player games\n\\begin{align}\n\\dot{x_A} = x_A ~ [f_A(\\vec{x}) - \\phi ]\\\n\\dot{x_B} = x_B ~ [f_B(\\vec{x}) - \\phi ]\n\\end{align}\n\\begin{align}\nf_A(\\vec{x}) = a~x_A+b~x_B\\\nf_B(\\vec{x}) = c~x_A+d~x_B\n\\end{align}\nPayoff matrix:\n\\begin{align}\n\\begin{pmatrix}\n a & b \\\n c & d \\\n \\end{pmatrix}\n\\end{align}\nIn following demo you can determine values for $a, b, c$ and $d and see how their values change determine the outcome of the game. You can also run the demo with different number of players.",
"fig = plt.figure()\nplt.close(fig)\n\ndef evolutionaryGame(x_,f,labels = None):\n np.random.seed(0)\n clear_output(wait=True)\n t_f = 10 \n dt = 0.1\n x_ = np.asarray(x_)\n x_ = np.atleast_2d(x_).T\n f = np.asarray(f)\n \n def int_(t,x):\n mean = x.T.dot(f.dot(x))\n dev = x*(f.dot(x)-mean)\n return dev\n\n integ = integrate.ode(int_)\n integ.set_integrator(\"dopri5\").set_initial_value(np.asarray(x_))\n y = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n x = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n xdot = np.zeros((int(t_f/dt)+1,x_.shape[0]))\n\n i = 0\n while integ.successful() and integ.t<t_f:\n y[i,:] = integ.y[:,0]\n x[i,:] = integ.t\n xdot[i,:] = int_(integ.t,y[i,:])\n integ.integrate(integ.t+dt)\n i=i+1\n \n fig.clf()\n ax = fig.gca()\n with sns.color_palette(\"PuBuGn_d\",n_colors=2):\n for ind_ in range(x.shape[1]):\n \n ax.plot(x[:,ind_], y[:,ind_], label=\"Type: %d\" % (ind_+1) if labels is None else labels[ind_])\n \n ax.set_ylim(-0.1,1.1)\n ax.set_xlabel(\"time\")\n ax.set_ylabel(\"Quasi specie\")\n ax.legend(ncol=min(4,x.shape[1]))\n \n display(fig)\n\nitems_strat = [\n widgets.IntText(\n value=2,\n min=2,\n max=5.0,\n description=\"r\",layout=widgets.Layout(width='50%', height='80px'))\n ,widgets.Button(\n description=\"submit\")]\n\ndef _EvolutionaryGames(v):\n init = []\n payoff = []\n for ind_,obj in enumerate(objects_strat):\n if ind_ < len(objects_strat)-1:\n init.append(obj[0].value)\n else:\n if sum(init)>1:\n status_labelstrat.value = \"Initial rates should sum to <1\"\n return\n else:\n status_labelstrat.value = \"\"\n init.append(1-sum(init))\n rate_ = []\n for j in range(0,len(objects_strat)):\n rate_.append(obj[j+1].value)\n payoff.append(rate_)\n init = np.asarray(init)\n payoff = np.asarray(payoff)\n if len(objects_strat)==3:\n status_labelstrat.value = \"Determinant: %.2f\" % linalg.det(payoff)\n return evolutionaryGame(init,payoff)\n\n\nobjects_strat = []\nstatus_labelstrat = None\nbox_vstrat = None\n\ndef call_back_mute(v):\n global box_vstrat, status_labelstrat\n if box_vstrat is not None:\n box_vstrat.close()\n\n count = items_strat[0].value\n if count <2:\n return\n dsps = []\n objects_strat[:] = []\n for i in range(count):\n if i < count-1:\n specie = [widgets.FloatSlider(\n value=1.0/count,\n min=0,\n max=1.0,\n step=0.01,\n description=\"init \"+str(i+1),layout=widgets.Layout(width='100%', height='80px'))]\n else:\n specie = [widgets.Label(layout=widgets.Layout(width='100%', height='80px'))]\n\n for j in range(count):\n wid = widgets.IntSlider(\n value=1,\n min=-1,\n max=5.0,\n step=1,\n description=str(chr(96+i*count+j+1)),layout=widgets.Layout(width='100%', height='80px'))\n wid.observe(_EvolutionaryGames,names='value')\n specie.append(wid)\n \n specie[0].observe(_EvolutionaryGames,names='value')\n box_h = widgets.HBox(specie,layout=widgets.Layout(width='100%', height='80px'))\n objects_strat.append(specie)\n dsps.append(box_h)\n\n status_labelstrat = widgets.Label()\n box_vstrat = widgets.VBox(dsps+[status_labelstrat],layout=widgets.Layout(width='100%', height='80px'))\n \n display(box_vstrat)\n _EvolutionaryGames(\"\")\n\n\nitems_strat[1].on_click(call_back_mute)\n\nbox_h = widgets.HBox(items_strat,layout=widgets.Layout(width='100%', height='80px'))\ndisplay(box_h)",
"Prisoners Dillema\nPayoff matrix:\n\\begin{align}\n\\begin{pmatrix}\n & C & D\\\n C & 3 & 0 \\\n D & 5 & 1 \\\n \\end{pmatrix}\n\\end{align}\nThe Nash equilibria in this game is to always defect (D,D).",
"R = 3\nS = 0\nT = 5\nP = 1\n\npayoff = [[R,S],[T,P]]\nevolutionaryGame([0.6,0.4],payoff,[\"Cooperate\",\"Defect\"])",
"Direct Respirocity vs. Always Defect.\nTomorrow never dies!\nPayoff matrix:\n\\begin{align}\n\\begin{pmatrix}\n & GRIM & ALLD\\\n GRIM & m3 & 0+(m-1)1 \\\n ALLD & 5+(m-1)1 & m1 \\\n \\end{pmatrix}\n\\end{align}\nWhere $m$ is expected days which the game will be repeated.\nif $3m > 5+(m-1)$ then GRIM is a strict Nash equilibrium when competing with ALLD. \nIn terms of evolutionary dynamics, if the whole population uses GRIM, then ALLD cannot invade: selection opposes ALLD at low frequency. GRIM is stable against invasion by ALLD if the number of rounds, $m$, exceeds a critical value:\n\\begin{align}\nm> \\frac{T-P}{R-P} = \\frac{4}{2} = 2\n\\end{align}\nIn following widget you can play with the value of $m$ to see how the two strategies perform.",
"def _EvolutionaryGamesProb(v):\n R = 3\n S = 0\n T = 5\n P = 1\n m_ = prob_tomorrow.value\n payoff = [[R*m_,S+(m_-1)*P],[T+(m_-1)*P,m_*P]]\n return evolutionaryGame([0.99,0.01],payoff,[\"GRIM\",\"ALLD\"])\n\nprob_tomorrow = widgets.FloatSlider(\n value=1,\n min=0,\n max=10.0,\n description=\"m_\",layout=widgets.Layout(width='100%', height='80px'))\n\nprob_tomorrow.observe(_EvolutionaryGamesProb,names=\"value\")\ndisplay(prob_tomorrow)",
"Reactive strategies\nTit-for-Tat.\nPayoff matrix:\n\\begin{align}\n\\begin{pmatrix}\n & CC & CD & DC & DD\\\n CC & p_1p_2 & p_1(1-p_2) & (1-p_1)p_2 & (1-p_1)(1-p_2) \\\n CD & q_1p_2 & q_1(1-p_2) & (1-q_1)p_2 & (1-q_1)(1-p_2) \\\n DC & p_1q_2 & p_1(1-q_2) & (1-p_1)q_2 & (1-p_1)(1-q_2) \\\n DD & q_1q_2 & q_1(1-q_2) & (1-q_1)q_2 & (1-q_1)(1-q_2) \\\n \\end{pmatrix}\n\\end{align}\n\n$p_1$: probability that player 1 will cooperate given that player 2 cooperated in previous round.\n$p_2$: probability that player 2 will cooperate given that player 1 cooperated in previous round.\n$q_1$: probability that player 1 will cooperate given that player 2 defected in previous round.\n$q_2$: probability that player 2 will cooperate given that player 1 defected in previous round.",
"p_1 = widgets.FloatSlider(\n value=0.5,\n min=0,\n max=1.0,\n description=\"p_1\",layout=widgets.Layout(width='100%', height='80px'))\nq_1 = widgets.FloatSlider(\n value=0.5,\n min=0,\n max=1.0,\n description=\"q_1\",layout=widgets.Layout(width='100%', height='80px'))\n\nuser_1 = widgets.HBox([p_1,q_1],layout=widgets.Layout(width='100%', height='80px'))\n\np_2 = widgets.FloatSlider(\n value=0.5,\n min=0,\n max=1.0,\n description=\"p_2\",layout=widgets.Layout(width='100%', height='80px'))\n\nq_2 = widgets.FloatSlider(\n value=0.5,\n min=0,\n max=1.0,\n description=\"q_2\",layout=widgets.Layout(width='100%', height='80px'))\n\nuser_2 = widgets.HBox([p_2,q_2],layout=widgets.Layout(width='100%', height='80px'))\n\nbox_pq = widgets.VBox([user_1,user_2],layout=widgets.Layout(width='100%', height='80px'))\n\ndef compute_expected_dist(p_1_v,p_2_v,q_1_v,q_2_v):\n v_ = np.array([[p_1_v*p_2_v, p_1_v*(1-p_2_v), (1-p_1_v)*p_2_v, (1-p_1_v)*(1-p_2_v)],\n [q_1_v*p_2_v, q_1_v*(1-p_2_v), (1-q_1_v)*p_2_v, (1-q_1_v)*(1-p_2_v)],\n [p_1_v*q_2_v, p_1_v*(1-q_2_v), (1-p_1_v)*q_2_v, (1-p_1_v)*(1-q_2_v)],\n [q_1_v*q_2_v, q_1_v*(1-q_2_v), (1-q_1_v)*q_2_v, (1-q_1_v)*(1-q_2_v)]]).T\n \n w,vl = linalg.eig(v_)\n return vl[:,0].real\n\ndef _EvolutionaryGamesGen(v):\n p_1_v = p_1.value\n p_2_v = p_2.value\n q_1_v = q_1.value\n q_2_v = q_2.value\n \n p_1_1 = compute_expected_dist(p_1_v,p_1_v,q_1_v,q_1_v)\n p_1_2 = compute_expected_dist(p_1_v,p_2_v,q_1_v,q_2_v)\n p_2_1 = compute_expected_dist(p_2_v,p_1_v,q_2_v,q_1_v)\n p_2_2 = compute_expected_dist(p_2_v,p_2_v,q_2_v,q_2_v)\n \n R = 3\n S = 0\n T = 5\n P = 1\n #print(p_1_1)\n payoff = [[R*p_1_1[0]+S*p_1_1[1]+T*p_1_1[2]+P**p_1_1[3], R*p_1_2[0]+S*p_1_2[1]+T*p_1_2[2]+P**p_1_2[3]],\n [R*p_2_1[0]+S*p_2_1[1]+T*p_2_1[2]+P**p_2_1[3], R*p_2_2[0]+S*p_2_2[1]+T*p_2_2[2]+P**p_2_2[3]]]\n payoff = np.array(payoff)\n \n return evolutionaryGame([0.4,0.6],payoff,['Policy 1','Policy 2'])\n\np_1.observe(_EvolutionaryGamesGen,names=\"value\")\np_2.observe(_EvolutionaryGamesGen,names=\"value\")\nq_1.observe(_EvolutionaryGamesGen,names=\"value\")\nq_2.observe(_EvolutionaryGamesGen,names=\"value\")\n\ndisplay(box_pq)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
amitkaps/applied-machine-learning
|
reference/Module-01a-reference.ipynb
|
mit
|
[
"Frame, Acquire & Refine\nRaw Data\nYou are provided with the following data: loan_data.csv\nThis is the historical data that the bank has provided. It has the following columns\nApplication Attributes:\n- years: Number of years the applicant has been employed\n- ownership: Whether the applicant owns a house or not\n- income: Annual income of the applicant\n- age: Age of the applicant \nBehavioural Attributes:\n- grade: Credit grade of the applicant\nOutcome Variable:\n- amount : Amount of Loan provided to the applicant\n- default : Whether the applicant has defaulted or not \n- interest: Interest rate charged for the applicant \nFrame the Problem\n\nWhat are the features?\nWhat is the target?\n\nDiscuss?\nAcquire the Data",
"#Load the libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n#Defualt Variables\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (16,9)\nplt.style.use('fivethirtyeight')\npd.set_option('display.float_format', lambda x: '%.2f' % x)\n\n#Load the dataset\ndf = pd.read_csv(\"data/loan_data.csv\")\n\n#View the first few rows of train\ndf.head()\n\n#View the columns of the train dataset\ndf.columns\n\n#View the data types of the train dataset\ndf.dtypes\n\n#View the number of records in the data\ndf.shape\n\n#View summary of raw data \ndf.describe()",
"Refine the Data\nLets check the dataset for quality and compeleteness\n1. Missing Values\n2. Outliers\nCheck for Missing Values",
"# Find if df has missing values. Hint: There is a isnull() function\ndf.isnull().head()",
"One consideration we check here is the number of observations with missing values for those columns that have missing values. If a column has too many missing values, it might make sense to drop the column.",
"#let's see how many missing values are present\ndf.isnull().sum()",
"So, we see that two columns have missing values: interest and years. Both the columns are numeric. We have three options for dealing with this missing values\nOptions to treat Missing Values\n- REMOVE - NAN rows\n- IMPUTATION - Replace them with something??\n - Mean \n - Median\n - Fixed Number - Domain Relevant\n - High Number (999) - Issue with modelling\n- BINNING - Categorical variable and \"Missing becomes a number\n- DOMAIN SPECIFIC* - Entry error, pipeline, etc.",
"#Let's replace missing values with the median of the column\ndf.describe()\n\n#there's a fillna function\ndf = df.fillna(df.median())\n\n#Now, let's check if train has missing values or not\ndf.isnull().any()",
"Check for Outlier Values\nLet us check first the categorical variables",
"# Which variables are Categorical?\ndf.dtypes\n\n# Create a Crosstab of those variables with another variable\npd.crosstab(df.default, df.grade)\n\n# Create a Crosstab of those variables with another variable\npd.crosstab(df.default, df.ownership)",
"Let us check outliers in the continuous variable\n\nPlotting\nHistogram\nBox-Plot \n\n\nMeasuring \nZ-score > 3\nModified Z-score > 3.5\nwhere modified Z-score = 0.6745 * (x - x_median) / MAD",
"# Describe the data set continuous values\ndf.describe()",
"Clearly the age variable looks like it has an outlier - Age cannot be greater 100! \nAlso the income variable looks like it may also have an outlier.",
"# Make a histogram of age\ndf.age.hist(bins=100)\n\n# Make a histogram of income\ndf.income.hist(bins=100)\n\n# Make Histograms for all other variables\n\n# Make a scatter of age and income\nplt.scatter(df.age, df.income)",
"Find the observation which has age = 144 and remove it from the dataframe",
"# Find the observation \ndf[df.age == 144]\n\ndf[df.age == 144].index\n\n# Use drop to remove the observation inplace\ndf.drop(df[df.age == 144].index, axis=0, inplace=True)\n\n# Find the shape of the df\ndf.shape\n\n# Check again for outliers\ndf.describe()\n\n# Save the new file as cleaned data\ndf.to_csv(\"data/loan_data_clean.csv\", index=False)\n\n#We are good to go to the next step"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
opesci/devito
|
examples/seismic/tutorials/08_snapshotting.ipynb
|
mit
|
[
"Snapshotting with Devito using the ConditionalDimension\nThis notebook intends to introduce new Devito users (especially with a C or FORTRAN background) to the best practice on saving snapshots to disk, as a binary float file. \nWe start by presenting a naive approach, and then introduce a more efficient method, which exploits Devito's ConditionalDimension.\nInitialize utilities",
"#NBVAL_IGNORE_OUTPUT\n%reset -f\nimport numpy as np\nimport matplotlib.pyplot as plt \n%matplotlib inline",
"Problem Setup\nThis tutorial is based on an example that has appeared in a TLE tutorial(Louboutin et. al., 2017), in which one shot is modeled over a 2-layer velocity model.",
"# This cell sets up the problem that is already explained in the first TLE tutorial.\n\n#NBVAL_IGNORE_OUTPUT\n#%%flake8\nfrom examples.seismic import Receiver\nfrom examples.seismic import RickerSource\nfrom examples.seismic import Model, plot_velocity, TimeAxis\nfrom devito import TimeFunction\nfrom devito import Eq, solve\nfrom devito import Operator\n\n\n# Set velocity model\nnx = 201\nnz = 201\nnb = 10\nshape = (nx, nz)\nspacing = (20., 20.)\norigin = (0., 0.)\nv = np.empty(shape, dtype=np.float32)\nv[:, :int(nx/2)] = 2.0\nv[:, int(nx/2):] = 2.5\n\nmodel = Model(vp=v, origin=origin, shape=shape, spacing=spacing,\n space_order=2, nbl=10, bcs=\"damp\")\n\n# Set time range, source, source coordinates and receiver coordinates\nt0 = 0. # Simulation starts a t=0\ntn = 1000. # Simulation lasts tn milliseconds\ndt = model.critical_dt # Time step from model grid spacing\ntime_range = TimeAxis(start=t0, stop=tn, step=dt)\nnt = time_range.num # number of time steps\n\nf0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)\nsrc = RickerSource(\n name='src',\n grid=model.grid,\n f0=f0,\n time_range=time_range) \n\nsrc.coordinates.data[0, :] = np.array(model.domain_size) * .5\nsrc.coordinates.data[0, -1] = 20. # Depth is 20m\n\nrec = Receiver(\n name='rec',\n grid=model.grid,\n npoint=101,\n time_range=time_range) # new\nrec.coordinates.data[:, 0] = np.linspace(0, model.domain_size[0], num=101)\nrec.coordinates.data[:, 1] = 20. # Depth is 20m\ndepth = rec.coordinates.data[:, 1] # Depth is 20m\n\n\nplot_velocity(model, source=src.coordinates.data,\n receiver=rec.coordinates.data[::4, :])\n\n#Used for reshaping\nvnx = nx+20 \nvnz = nz+20\n\n# Set symbolics for the wavefield object `u`, setting save on all time steps \n# (which can occupy a lot of memory), to later collect snapshots (naive method):\n\nu = TimeFunction(name=\"u\", grid=model.grid, time_order=2,\n space_order=2, save=time_range.num)\n\n# Set symbolics of the operator, source and receivers:\npde = model.m * u.dt2 - u.laplace + model.damp * u.dt\nstencil = Eq(u.forward, solve(pde, u.forward))\nsrc_term = src.inject(field=u.forward, expr=src * dt**2 / model.m,\n offset=model.nbl)\nrec_term = rec.interpolate(expr=u, offset=model.nbl)\nop = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)\n\n# Run the operator for `(nt-2)` time steps:\nop(time=nt-2, dt=model.critical_dt)\n",
"Saving snaps to disk - naive approach\nWe want to get equally spaced snaps from the nt-2 saved in u.data. The user can then define the total number of snaps nsnaps, which determines a factor to divide nt.",
"nsnaps = 100\nfactor = round(u.shape[0] / nsnaps) # Get approx nsnaps, for any nt\nucopy = u.data.copy(order='C')\nfilename = \"naivsnaps.bin\"\nfile_u = open(filename, 'wb')\nfor it in range(0, nsnaps):\n file_u.write(ucopy[it*factor, :, :])\nfile_u.close()",
"Checking u.data spaced by factor using matplotlib,",
"#NBVAL_IGNORE_OUTPUT\nplt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n\nimcnt = 1 # Image counter for plotting\nplot_num = 5 # Number of images to plot\n\nfor i in range(0, nsnaps, int(nsnaps/plot_num)):\n plt.subplot(1, plot_num+1, imcnt+1)\n imcnt = imcnt + 1\n plt.imshow(np.transpose(u.data[i * factor, :, :]), vmin=-1, vmax=1, cmap=\"seismic\")\n\nplt.show()\n",
"Or from the saved file:",
"#NBVAL_IGNORE_OUTPUT\nfobj = open(\"naivsnaps.bin\", \"rb\") \nsnaps = np.fromfile(fobj, dtype = np.float32) \nsnaps = np.reshape(snaps, (nsnaps, vnx, vnz)) #reshape vec2mtx, devito format. nx first\nfobj.close()\n\nplt.rcParams['figure.figsize'] = (20,20) # Increases figure size\n\nimcnt = 1 # Image counter for plotting\nplot_num = 5 # Number of images to plot\n\nfor i in range(0, nsnaps, int(nsnaps/plot_num)):\n plt.subplot(1, plot_num+1, imcnt+1);\n imcnt = imcnt + 1\n plt.imshow(np.transpose(snaps[i,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n\nplt.show() ",
"This C/FORTRAN way of saving snaps is clearly not optimal when using Devito; the wavefield object u is specified to save all snaps, and a memory copy is done at every op time step. Giving that we don't want all the snaps saved, this process is wasteful; only the selected snapshots should be copied during execution. \nTo address these issues, a better way to save snaps using Devito's capabilities is presented in the following section.\nSaving snaps to disk - Devito method\nA better way to save snapshots to disk is to create a new TimeFunction, usave, whose time size is equal to \nnsnaps. There are 3 main differences from the previous code, which are flagged by #Part 1, #Part 2 and #Part 3 . After running the code each part is explained with more detail.",
"#NBVAL_IGNORE_OUTPUT\nfrom devito import ConditionalDimension\n\nnsnaps = 103 # desired number of equally spaced snaps\nfactor = round(nt / nsnaps) # subsequent calculated factor\n\nprint(f\"factor is {factor}\")\n\n#Part 1 #############\ntime_subsampled = ConditionalDimension(\n 't_sub', parent=model.grid.time_dim, factor=factor)\nusave = TimeFunction(name='usave', grid=model.grid, time_order=2, space_order=2,\n save=nsnaps, time_dim=time_subsampled)\nprint(time_subsampled)\n#####################\n\nu = TimeFunction(name=\"u\", grid=model.grid, time_order=2, space_order=2)\npde = model.m * u.dt2 - u.laplace + model.damp * u.dt\nstencil = Eq(u.forward, solve(pde, u.forward))\nsrc_term = src.inject(\n field=u.forward,\n expr=src * dt**2 / model.m,\n offset=model.nbl)\nrec_term = rec.interpolate(expr=u, offset=model.nbl)\n\n#Part 2 #############\nop1 = Operator([stencil] + src_term + rec_term,\n subs=model.spacing_map) # usual operator\nop2 = Operator([stencil] + src_term + [Eq(usave, u)] + rec_term,\n subs=model.spacing_map) # operator with snapshots\n\nop1(time=nt - 2, dt=model.critical_dt) # run only for comparison\nu.data.fill(0.)\nop2(time=nt - 2, dt=model.critical_dt)\n#####################\n\n#Part 3 #############\nprint(\"Saving snaps file\")\nprint(\"Dimensions: nz = {:d}, nx = {:d}\".format(nz + 2 * nb, nx + 2 * nb))\nfilename = \"snaps2.bin\"\nusave.data.tofile(filename)\n#####################",
"As usave.data has the desired snaps, no extra variable copy is required. The snaps can then be visualized:",
"#NBVAL_IGNORE_OUTPUT\nfobj = open(\"snaps2.bin\", \"rb\")\nsnaps = np.fromfile(fobj, dtype=np.float32)\nsnaps = np.reshape(snaps, (nsnaps, vnx, vnz))\nfobj.close()\n\nplt.rcParams['figure.figsize'] = (20, 20) # Increases figure size\n\nimcnt = 1 # Image counter for plotting\nplot_num = 5 # Number of images to plot\nfor i in range(0, plot_num):\n plt.subplot(1, plot_num, i+1);\n imcnt = imcnt + 1\n ind = i * int(nsnaps/plot_num)\n plt.imshow(np.transpose(snaps[ind,:,:]), vmin=-1, vmax=1, cmap=\"seismic\")\n\nplt.show() ",
"About Part 1\nHere a subsampled version (time_subsampled) of the full time Dimension (model.grid.time_dim) is created with the ConditionalDimension. time_subsampled is then used to define an additional symbolic wavefield usave, which will store in usave.data only the predefined number of snapshots (see Part 2).\nFurther insight on how ConditionalDimension works and its most common uses can be found in the Devito documentation. The following excerpt exemplifies subsampling of simple functions:\nAmong the other things, ConditionalDimensions are indicated to implement\nFunction subsampling. In the following example, an Operator evaluates the\nFunction ``g`` and saves its content into ``f`` every ``factor=4`` iterations.\n\n>>> from devito import Dimension, ConditionalDimension, Function, Eq, Operator\n>>> size, factor = 16, 4\n>>> i = Dimension(name='i')\n>>> ci = ConditionalDimension(name='ci', parent=i, factor=factor)\n>>> g = Function(name='g', shape=(size,), dimensions=(i,))\n>>> f = Function(name='f', shape=(int(size/factor),), dimensions=(ci,))\n>>> op = Operator([Eq(g, 1), Eq(f, g)])\n\nThe Operator generates the following for-loop (pseudocode)\n.. code-block:: C\n for (int i = i_m; i <= i_M; i += 1) {\n g[i] = 1;\n if (i%4 == 0) {\n f[i / 4] = g[i];\n }\n }\n\nFrom this excerpt we can see that the C code generated by Operator with the extra argument Eq(f,g) mainly corresponds to adding an if block on the optimized C-code, which saves the desired snapshots on f, from g, at the correct times. Following the same line of thought, in the following section the symbolic and C-generated code are compared, with and without snapshots.\nAbout Part 2\nWe then define Operators op1 (no snaps) and op2 (with snaps). The only difference between the two is that op2 has an extra symbolic equation Eq(usave, u). Notice that even though usave and u have different Dimensions, Devito's symbolic interpreter understands it, because usave's time_dim was defined through the ConditionalDimension. \nBelow, we show relevant excerpts of the compiled Operators. As explained above, the main difference between the optimized C-code of op1 and op2 is the addition of an if block. For op1's C code:\n```c\n// #define's\n//...\n// declare dataobj struct\n//...\n// declare profiler struct\n//...\nint Kernel(struct dataobj restrict damp_vec, const float dt, struct dataobj restrict m_vec, const float o_x, const float o_y, struct dataobj restrict rec_vec, struct dataobj restrict rec_coords_vec, struct dataobj restrict src_vec, struct dataobj restrict src_coords_vec, struct dataobj *restrict u_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)\n{\n // ...\n // ...\nfloat (restrict u)[u_vec->size[1]][u_vec->size[2]] attribute ((aligned (64))) = (float ()[u_vec->size[1]][u_vec->size[2]]) u_vec->data;\n // ...\nfor (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))\n {\n struct timeval start_section0, end_section0;\n gettimeofday(&start_section0, NULL);\n for (int x = x_m; x <= x_M; x += 1)\n {\n #pragma omp simd\n for (int y = y_m; y <= y_M; y += 1)\n {\n float r0 = 1.0e+4Fdtm[x + 2][y + 2] + 5.0e+3F(dtdt)damp[x + 1][y + 1];\n u[t1][x + 2][y + 2] = 2.0e+4Fdtm[x + 2][y + 2]u[t0][x + 2][y + 2]/r0 - 1.0e+4Fdtm[x + 2][y + 2]u[t2][x + 2][y + 2]/r0 + 1.0e+2F((dtdtdt)u[t0][x + 1][y + 2]/r0 + (dtdtdt)u[t0][x + 2][y + 1]/r0 + (dtdtdt)u[t0][x + 2][y + 3]/r0 + (dtdtdt)u[t0][x + 3][y + 2]/r0) + 5.0e+3F(dtdt)damp[x + 1][y + 1]u[t2][x + 2][y + 2]/r0 - 4.0e+2Fdtdtdtu[t0][x + 2][y + 2]/r0;\n }\n }\n gettimeofday(&end_section0, NULL);\n timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;\n struct timeval start_section1, end_section1;\n gettimeofday(&start_section1, NULL);\n for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)\n {\n //source injection\n //...\n }\n gettimeofday(&end_section1, NULL);\n timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;\n struct timeval start_section2, end_section2;\n gettimeofday(&start_section2, NULL);\n for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)\n {\n //receivers interpolation\n //...\n }\n gettimeofday(&end_section2, NULL);\n timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;\n }\n return 0;\n}\n```\nop2's C code (differences are highlighted by //<<<<<<<<<<<<<<<<<<<<):\n```c\n// #define's\n//...\n// declare dataobj struct\n//...\n// declare profiler struct\n//...\nint Kernel(struct dataobj restrict damp_vec, const float dt, struct dataobj restrict m_vec, const float o_x, const float o_y, struct dataobj restrict rec_vec, struct dataobj restrict rec_coords_vec, struct dataobj restrict src_vec, struct dataobj restrict src_coords_vec, struct dataobj restrict u_vec, struct dataobj restrict usave_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)\n{\n // ...\n // ...\nfloat (restrict u)[u_vec->size[1]][u_vec->size[2]] attribute ((aligned (64))) = (float ()[u_vec->size[1]][u_vec->size[2]]) u_vec->data;\n//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<DECLARE USAVE<<<<<<<<<<<<<<<<<<<<< \n float (restrict usave)[usave_vec->size[1]][usave_vec->size[2]] attribute ((aligned (64))) = (float ()[usave_vec->size[1]][usave_vec->size[2]]) usave_vec->data;\n//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< \n//flush denormal numbers...\n\nfor (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))\n {\n struct timeval start_section0, end_section0;\n gettimeofday(&start_section0, NULL);\n for (int x = x_m; x <= x_M; x += 1)\n {\n #pragma omp simd\n for (int y = y_m; y <= y_M; y += 1)\n {\n float r0 = 1.0e+4Fdtm[x + 2][y + 2] + 5.0e+3F(dtdt)damp[x + 1][y + 1];\n u[t1][x + 2][y + 2] = 2.0e+4Fdtm[x + 2][y + 2]u[t0][x + 2][y + 2]/r0 - 1.0e+4Fdtm[x + 2][y + 2]u[t2][x + 2][y + 2]/r0 + 1.0e+2F((dtdtdt)u[t0][x + 1][y + 2]/r0 + (dtdtdt)u[t0][x + 2][y + 1]/r0 + (dtdtdt)u[t0][x + 2][y + 3]/r0 + (dtdtdt)u[t0][x + 3][y + 2]/r0) + 5.0e+3F(dtdt)damp[x + 1][y + 1]u[t2][x + 2][y + 2]/r0 - 4.0e+2Fdtdtdtu[t0][x + 2][y + 2]/r0;\n }\n }\n gettimeofday(&end_section0, NULL);\n timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;\n//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<SAVE SNAPSHOT<<<<<<<<<<<<<<<<<<<<<\n if ((time)%(60) == 0)\n {\n struct timeval start_section1, end_section1;\n gettimeofday(&start_section1, NULL);\n for (int x = x_m; x <= x_M; x += 1)\n {\n #pragma omp simd\n for (int y = y_m; y <= y_M; y += 1)\n {\n usave[time / 60][x + 2][y + 2] = u[t0][x + 2][y + 2];\n }\n }\n//<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n gettimeofday(&end_section1, NULL);\n timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;\n }\n struct timeval start_section2, end_section2;\n gettimeofday(&start_section2, NULL);\n for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)\n {\n //source injection\n //...\n }\n gettimeofday(&end_section2, NULL);\n timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;\n struct timeval start_section3, end_section3;\n gettimeofday(&start_section3, NULL);\n for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)\n {\n //receivers interpolation\n //...\n }\n gettimeofday(&end_section3, NULL);\n timers->section3 += (double)(end_section3.tv_sec-start_section3.tv_sec)+(double)(end_section3.tv_usec-start_section3.tv_usec)/1000000;\n }\n return 0;\n}\n```\nTo inspect the full codes of op1 and op2, run the block below:",
"def print2file(filename, thingToPrint):\n import sys\n\n orig_stdout = sys.stdout\n\n f = open(filename, 'w')\n sys.stdout = f\n print(thingToPrint)\n f.close()\n\n sys.stdout = orig_stdout\n\n\n# print2file(\"op1.c\", op1) # uncomment to print to file\n# print2file(\"op2.c\", op2) # uncomment to print to file\n# print(op1) # uncomment to print here\n# print(op2) # uncomment to print here",
"To run snaps as a movie (outside Jupyter Notebook), run the code below, altering filename, nsnaps, nx, nz accordingly:",
"#NBVAL_IGNORE_OUTPUT\n#NBVAL_SKIP\nfrom IPython.display import HTML\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\nfilename = \"naivsnaps.bin\"\nnsnaps = 100\nfobj = open(filename, \"rb\")\nsnapsObj = np.fromfile(fobj, dtype=np.float32)\nsnapsObj = np.reshape(snapsObj, (nsnaps, vnx, vnz))\nfobj.close()\n\nfig, ax = plt.subplots()\nmatrice = ax.imshow(snapsObj[0, :, :].T, vmin=-1, vmax=1, cmap=\"seismic\")\nplt.colorbar(matrice)\n\nplt.xlabel('x')\nplt.ylabel('z')\nplt.title('Modelling one shot over a 2-layer velocity model with Devito.') \n\ndef update(i):\n matrice.set_array(snapsObj[i, :, :].T)\n return matrice,\n\n# Animation\nani = animation.FuncAnimation(fig, update, frames=nsnaps, interval=50, blit=True)\n\nplt.close(ani._fig)\nHTML(ani.to_html5_video())\n",
"References\nLouboutin, M., Witte, P., Lange, M., Kukreja, N., Luporini, F., Gorman, G., & Herrmann, F. J. (2017). Full-waveform inversion, Part 1: Forward modeling. The Leading Edge, 36(12), 1033-1036."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
0.23/_downloads/b89584de6ec99a847868d7b80a32cf50/80_dics.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"DICS for power mapping\nIn this tutorial, we'll simulate two signals originating from two\nlocations on the cortex. These signals will be sinusoids, so we'll be looking\nat oscillatory activity (as opposed to evoked activity).\nWe'll use dynamic imaging of coherent sources (DICS) :footcite:GrossEtAl2001\nto map out spectral power along the cortex. Let's see if we can find our two\nsimulated sources.",
"# Author: Marijn van Vliet <w.m.vanvliet@gmail.com>\n#\n# License: BSD (3-clause)",
"Setup\nWe first import the required packages to run this tutorial and define a list\nof filenames for various things we'll be using.",
"import os.path as op\nimport numpy as np\nfrom scipy.signal import welch, coherence, unit_impulse\nfrom matplotlib import pyplot as plt\n\nimport mne\nfrom mne.simulation import simulate_raw, add_noise\nfrom mne.datasets import sample\nfrom mne.minimum_norm import make_inverse_operator, apply_inverse\nfrom mne.time_frequency import csd_morlet\nfrom mne.beamformer import make_dics, apply_dics_csd\n\n# We use the MEG and MRI setup from the MNE-sample dataset\ndata_path = sample.data_path(download=False)\nsubjects_dir = op.join(data_path, 'subjects')\n\n# Filenames for various files we'll be using\nmeg_path = op.join(data_path, 'MEG', 'sample')\nraw_fname = op.join(meg_path, 'sample_audvis_raw.fif')\nfwd_fname = op.join(meg_path, 'sample_audvis-meg-eeg-oct-6-fwd.fif')\ncov_fname = op.join(meg_path, 'sample_audvis-cov.fif')\nfwd = mne.read_forward_solution(fwd_fname)\n\n# Seed for the random number generator\nrand = np.random.RandomState(42)",
"Data simulation\nThe following function generates a timeseries that contains an oscillator,\nwhose frequency fluctuates a little over time, but stays close to 10 Hz.\nWe'll use this function to generate our two signals.",
"sfreq = 50. # Sampling frequency of the generated signal\nn_samp = int(round(10. * sfreq))\ntimes = np.arange(n_samp) / sfreq # 10 seconds of signal\nn_times = len(times)\n\n\ndef coh_signal_gen():\n \"\"\"Generate an oscillating signal.\n\n Returns\n -------\n signal : ndarray\n The generated signal.\n \"\"\"\n t_rand = 0.001 # Variation in the instantaneous frequency of the signal\n std = 0.1 # Std-dev of the random fluctuations added to the signal\n base_freq = 10. # Base frequency of the oscillators in Hertz\n n_times = len(times)\n\n # Generate an oscillator with varying frequency and phase lag.\n signal = np.sin(2.0 * np.pi *\n (base_freq * np.arange(n_times) / sfreq +\n np.cumsum(t_rand * rand.randn(n_times))))\n\n # Add some random fluctuations to the signal.\n signal += std * rand.randn(n_times)\n\n # Scale the signal to be in the right order of magnitude (~100 nAm)\n # for MEG data.\n signal *= 100e-9\n\n return signal",
"Let's simulate two timeseries and plot some basic information about them.",
"signal1 = coh_signal_gen()\nsignal2 = coh_signal_gen()\n\nfig, axes = plt.subplots(2, 2, figsize=(8, 4))\n\n# Plot the timeseries\nax = axes[0][0]\nax.plot(times, 1e9 * signal1, lw=0.5)\nax.set(xlabel='Time (s)', xlim=times[[0, -1]], ylabel='Amplitude (Am)',\n title='Signal 1')\nax = axes[0][1]\nax.plot(times, 1e9 * signal2, lw=0.5)\nax.set(xlabel='Time (s)', xlim=times[[0, -1]], title='Signal 2')\n\n# Power spectrum of the first timeseries\nf, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)\nax = axes[1][0]\n# Only plot the first 100 frequencies\nax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.)\nax.set(xlabel='Frequency (Hz)', xlim=f[[0, 99]],\n ylabel='Power (dB)', title='Power spectrum of signal 1')\n\n# Compute the coherence between the two timeseries\nf, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)\nax = axes[1][1]\nax.plot(f[:50], coh[:50], lw=1.)\nax.set(xlabel='Frequency (Hz)', xlim=f[[0, 49]], ylabel='Coherence',\n title='Coherence between the timeseries')\nfig.tight_layout()",
"Now we put the signals at two locations on the cortex. We construct a\n:class:mne.SourceEstimate object to store them in.\nThe timeseries will have a part where the signal is active and a part where\nit is not. The techniques we'll be using in this tutorial depend on being\nable to contrast data that contains the signal of interest versus data that\ndoes not (i.e. it contains only noise).",
"# The locations on the cortex where the signal will originate from. These\n# locations are indicated as vertex numbers.\nvertices = [[146374], [33830]]\n\n# Construct SourceEstimates that describe the signals at the cortical level.\ndata = np.vstack((signal1, signal2))\nstc_signal = mne.SourceEstimate(\n data, vertices, tmin=0, tstep=1. / sfreq, subject='sample')\nstc_noise = stc_signal * 0.",
"Before we simulate the sensor-level data, let's define a signal-to-noise\nratio. You are encouraged to play with this parameter and see the effect of\nnoise on our results.",
"snr = 1. # Signal-to-noise ratio. Decrease to add more noise.",
"Now we run the signal through the forward model to obtain simulated sensor\ndata. To save computation time, we'll only simulate gradiometer data. You can\ntry simulating other types of sensors as well.\nSome noise is added based on the baseline noise covariance matrix from the\nsample dataset, scaled to implement the desired SNR.",
"# Read the info from the sample dataset. This defines the location of the\n# sensors and such.\ninfo = mne.io.read_info(raw_fname)\ninfo.update(sfreq=sfreq, bads=[])\n\n# Only use gradiometers\npicks = mne.pick_types(info, meg='grad', stim=True, exclude=())\nmne.pick_info(info, picks, copy=False)\n\n# Define a covariance matrix for the simulated noise. In this tutorial, we use\n# a simple diagonal matrix.\ncov = mne.cov.make_ad_hoc_cov(info)\ncov['data'] *= (20. / snr) ** 2 # Scale the noise to achieve the desired SNR\n\n# Simulate the raw data, with a lowpass filter on the noise\nstcs = [(stc_signal, unit_impulse(n_samp, dtype=int) * 1),\n (stc_noise, unit_impulse(n_samp, dtype=int) * 2)] # stacked in time\nduration = (len(stc_signal.times) * 2) / sfreq\nraw = simulate_raw(info, stcs, forward=fwd)\nadd_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)",
"We create an :class:mne.Epochs object containing two trials: one with\nboth noise and signal and one with just noise",
"events = mne.find_events(raw, initial_event=True)\ntmax = (len(stc_signal.times) - 1) / sfreq\nepochs = mne.Epochs(raw, events, event_id=dict(signal=1, noise=2),\n tmin=0, tmax=tmax, baseline=None, preload=True)\nassert len(epochs) == 2 # ensure that we got the two expected events\n\n# Plot some of the channels of the simulated data that are situated above one\n# of our simulated sources.\npicks = mne.pick_channels(epochs.ch_names,\n mne.read_vectorview_selection('Left-frontal'))\nepochs.plot(picks=picks)",
"Power mapping\nWith our simulated dataset ready, we can now pretend to be researchers that\nhave just recorded this from a real subject and are going to study what parts\nof the brain communicate with each other.\nFirst, we'll create a source estimate of the MEG data. We'll use both a\nstraightforward MNE-dSPM inverse solution for this, and the DICS beamformer\nwhich is specifically designed to work with oscillatory data.\nComputing the inverse using MNE-dSPM:",
"# Compute the inverse operator\nfwd = mne.read_forward_solution(fwd_fname)\ninv = make_inverse_operator(epochs.info, fwd, cov)\n\n# Apply the inverse model to the trial that also contains the signal.\ns = apply_inverse(epochs['signal'].average(), inv)\n\n# Take the root-mean square along the time dimension and plot the result.\ns_rms = np.sqrt((s ** 2).mean())\ntitle = 'MNE-dSPM inverse (RMS)'\nbrain = s_rms.plot('sample', subjects_dir=subjects_dir, hemi='both', figure=1,\n size=600, time_label=title, title=title)\n\n# Indicate the true locations of the source activity on the plot.\nbrain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh')\nbrain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh')\n\n# Rotate the view and add a title.\nbrain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,\n 'focalpoint': [0, 0, 0]})",
"We will now compute the cortical power map at 10 Hz. using a DICS beamformer.\nA beamformer will construct for each vertex a spatial filter that aims to\npass activity originating from the vertex, while dampening activity from\nother sources as much as possible.\nThe :func:mne.beamformer.make_dics function has many switches that offer\nprecise control\nover the way the filter weights are computed. Currently, there is no clear\nconsensus regarding the best approach. This is why we will demonstrate two\napproaches here:\n\nThe approach as described in :footcite:vanVlietEtAl2018, which first\n normalizes the forward solution and computes a vector beamformer.\nThe scalar beamforming approach based on\n :footcite:SekiharaNagarajan2008, which uses weight normalization\n instead of normalizing the forward solution.",
"# Estimate the cross-spectral density (CSD) matrix on the trial containing the\n# signal.\ncsd_signal = csd_morlet(epochs['signal'], frequencies=[10])\n\n# Compute the spatial filters for each vertex, using two approaches.\nfilters_approach1 = make_dics(\n info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=1.,\n inversion='single', weight_norm=None, real_filter=True)\nprint(filters_approach1)\n\nfilters_approach2 = make_dics(\n info, fwd, csd_signal, reg=0.05, pick_ori='max-power', depth=None,\n inversion='matrix', weight_norm='unit-noise-gain', real_filter=True)\nprint(filters_approach2)\n\n# You can save these to disk with:\n# filters_approach1.save('filters_1-dics.h5')\n\n# Compute the DICS power map by applying the spatial filters to the CSD matrix.\npower_approach1, f = apply_dics_csd(csd_signal, filters_approach1)\npower_approach2, f = apply_dics_csd(csd_signal, filters_approach2)",
"Plot the DICS power maps for both approaches, starting with the first:",
"def plot_approach(power, n):\n \"\"\"Plot the results on a brain.\"\"\"\n title = 'DICS power map, approach %d' % n\n brain = power_approach1.plot(\n 'sample', subjects_dir=subjects_dir, hemi='both',\n size=600, time_label=title, title=title)\n # Indicate the true locations of the source activity on the plot.\n brain.add_foci(vertices[0][0], coords_as_verts=True, hemi='lh', color='b')\n brain.add_foci(vertices[1][0], coords_as_verts=True, hemi='rh', color='b')\n # Rotate the view and add a title.\n brain.show_view(view={'azimuth': 0, 'elevation': 0, 'distance': 550,\n 'focalpoint': [0, 0, 0]})\n return brain\n\n\nbrain1 = plot_approach(power_approach1, 1)",
"Now the second:",
"brain2 = plot_approach(power_approach2, 2)",
"Excellent! All methods found our two simulated sources. Of course, with a\nsignal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can\ntry playing with the SNR and see how the MNE-dSPM and DICS approaches hold up\nin the presence of increasing noise. In the presence of more noise, you may\nneed to increase the regularization parameter of the DICS beamformer.\nReferences\n.. footbibliography::"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
lwahedi/CurrentPresentation
|
talks/MDI3/.ipynb_checkpoints/networkslides-checkpoint.ipynb
|
mit
|
[
"Collecting and Using Data in Python\nLaila A. Wahedi\nMassive Data Institute Postdoctoral Fellow <br>McCourt School of Public Policy<br>\nFollow along: Wahedi.us, Current Presentation\nWhat is a network?\n\nNodes/verticies\nlinks/edges/ties/arcs\nAlso called a graph\n\nAny relationship or interaction\nExamples\n\nPeople and friendships\nPeople and acquaintances\nPeople who shop at the same store\nPeople who live in the same zip code\nStores that use the same distributor\nMovies with the same actors\nWords used in the same sentence\n\nBipartite: Networks with two types of node\n\nStudents and Mentors\nStores and Distributors \n\nWeighted: Networks with ties of different strength\n\nTwitter users and the number of retweets\nPeople and distance between homes\nCities and the dollars of trade\nCities and the number of travelers\n\nDirected Networks:\n\nAn edge is not always symmetrical\nTravel from one city to another\nSales from one country to another\nPhone calls from one person to another\nTwitter mentions\n\nRepresenting a network: Edge List\n\nDyads\nEach row contains a pair of nodes indicating a tie\nA third column can indicate weight\nOrder may indicate direction of the edge\n<center>\nFred, Maria<br>\nFred, Samir<br>\nFred, Jose<br>\nSamir, Jose<br>\nSamir, Sonya<br>\nSonya, Maria<br>\n</center>\n\nRepresenting a network: Adjacency Matrix\n\nnxn matrix of nodes, where position i,j indicates relationship between node i and node j\nCan be symmetrical or directed, use weights or indicators with 1\nLess space efficient for sparse networks, but convenient for linear algebra operations\n<img src=\"adj.png\">\n\nYour data:\n\nis there any relationship between your units of analysis?\n\nWhat are some examples?\n\nPackages for today:\n\nNetworkX: network analysis package\nscipy, numpy, pandas: math and data frames\nitertools: Built in package for iterating\nMatplotlib: required for making plots\nStatsmodels to run regression",
"import pandas as pd\nimport networkx as nx\nimport numpy as np\nimport scipy as sp\nimport itertools\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n%matplotlib inline",
"Toy Network\n\nInstantiate\nAdd nodes\nAdd edges\nDraw",
"G = nx.Graph()\nG.add_nodes_from(['A','B','C','D','E','F','G'])\nG.add_edges_from([('A','B'),('A','C'),\n ('A','D'),('A','F'),\n ('B','E'),('C','E'),\n ('F','G')])\nnx.draw_networkx(G, with_labels=True)",
"Centrality\n\nWay to measure the nature of the connectedness of a group\nMany centrality measures\nUse theory to pick one. \n\nSome common measures:\nDegree Centrality\n\nNumber of ties\nSum of rows\nIn-degree: number of edges to a node\nOut-degree: number of edges from a node",
"deg = nx.degree_centrality(G)\nprint(deg)",
"Eigenvector Centrality\n\nConnectedness to other well-connected nodes\nTheoretical Implication: A lot of work to maintain ties to everyone, sometimes just as good to know someone who knows everyone.\nFinding a job\nRumors\n\nSupply\n\n\nRequires connected network\n\nCannot compare across networks\n\nWhen might eigenvector centrality be less useful?\nCalculating Eigenvector Centrality\n\nTake eigenvector for maximum eigenvalue\nnx.eigenvector_centrality uses a different method that usually converges to the same result, but sometimes errors.",
"eig_c = nx.eigenvector_centrality_numpy(G)\ntoy_adj = nx.adjacency_matrix(G)\nprint(eig_c)\nval,vec = np.linalg.eig(toy_adj.toarray())\nprint(val)\nvec[:,0]",
"Betweenness Centrality\n\nProportional to the number of shortest paths that pass through a given node\nHow important is that node in connecting other nodes\nMedicci family was not well connected, but strategically connected.",
"betw = nx.betweenness_centrality(G)\nprint(betw)",
"Centrality Measures Are Different\n\nSelect based on theory you want to capture\nTake a minute to play around with the network and see how the relationships change",
"cent_scores = pd.DataFrame({'deg':deg,'eig_c':eig_c,'betw':betw})\nprint(cent_scores.corr())\ncent_scores",
"Transitivity\n\nExtent to which friends have friends in common\nProbability two nodes are tied given that they have a partner in common\nMake a more transitive network:",
"G_trans = G.copy()\nG_trans.add_edge('A','E')\nG_trans.add_edge('F','D')\nnx.draw_networkx(G_trans, with_labels=True)",
"Measure Transitivity\n\nWhole network: \nTransitivity: Proportion of possible triangles present in the network\nIndividual nodes: \nCount the triangles",
"print(\"Transitivity:\")\nprint(nx.transitivity(G))\nprint(nx.transitivity(G_trans))\nprint(\"Triangles:\")\nprint(nx.triangles(G))\nprint(nx.triangles(G_trans))",
"Clustering Coefficient\n\nIndividual Nodes:\nProportion of possible triangles through a given node\nWhole Network\nAverage clustering across whole network",
"print(\"Clustering coefficient\")\nprint(nx.clustering(G))\nprint(nx.clustering(G_trans))\nprint(\"Average Clustering\")\nprint(nx.average_clustering(G))\nprint(nx.average_clustering(G_trans))",
"Community Detection\n\nDivide the network into subgroups using different algorithms\nExamples\nPercolation: find communities with fully connected cores\nMinimum cuts (nodes): Find the minimum number of nodes that, if removed, break the network into multiple components. Progressively remove them. \n<strong>Girvan Newman Algorithm: </strong> Remove ties with highest betweenness, continue until network broken into desired number of communities",
"coms = nx.algorithms.community.centrality.girvan_newman(G)\ni = 2\nfor com in itertools.islice(coms,4):\n print(i, ' communities')\n i+=1\n print(tuple(c for c in com))",
"Real Network: Senate co-sponsorship\n\nNodes: Senators\nLinks: Sponsorship of the same piece of legislation. \nWeighted\n\n<h4>Download here:</h4>\nhttps://dataverse.harvard.edu/file.xhtml;jsessionid=e627083a7d8f43616bbe7d4ada3e?fileId=615937&version=RELEASED&version=.0\n<h4> Start with the cosponsors.txt file</h4>\n\nSimilar to an edgelist for a bipartite graph\nEach line is a bill\nEach line lists all cosponsors\n\n<h1>Load The Cosponsor Data </h1>\n<ol>\n<li>Instantiate a list for the edgelist</li>\n<li> Open the file</li>\n<li> Loop through lines</li>\n<li> Store the lines</li>\n</ol>",
"edges = []\nwith open('cosponsors.txt') as d:\n for line in d:\n edges.append(line.split())",
"Subset the Data: Year\n<h3> 2004</h3>\n\nDownload dates.txt\nEach row is the date\nYear, month, day separated by \"-\"",
"dates = pd.read_csv('Dates.txt',sep='-',header=None)\ndates.columns = ['year','month','day']\nindex_loc = np.where(dates.year==2004)\nedges_04 = [edges[i] for i in index_loc[0]]",
"Subset the Data: Senate\n\nDownload senate.csv\nGives the ids for senators\nFilter down to the rows for 106th congress (2000)\n\n<h3> This gives us our nodes </h3>\n\nInstantiate adjacency matrix of size nxn\nCreate an ordinal index so we can index the matrix\nAdd an attribute",
"# Get nodes\nsenate = pd.read_csv('senate.csv')\nsenators = senate.loc[senate.congress==108,['id','party']]\n# Creae adjacency matrix\nadj_mat = np.zeros([len(senators),len(senators)])\nsenators = pd.DataFrame(senators)\nsenators['adj_ind']=range(len(senators))\n# Create Graph Object\nsenateG= nx.Graph()\nsenateG.add_nodes_from(senators.id)\nparty_dict = dict(zip(senators.id,senators.party))\nnx.set_node_attributes(senateG, name='party',values=party_dict)",
"Create the network (two ways)\n\nLoop through bills\nCheck that there's data, and that it's a senate bill\nCreate pairs for every combination of cosponsors\n\nAdd directly to NetworkX graph object\n\nAdd edges from the list of combinations\nNot weighted\n\nAdd to adjacency matrix using new index\n\nIdentify index for each pair\nAdd to adjacency matrix using index",
"for bill in edges_04:\n if bill[0] == \"NA\": continue\n bill = [int(i) for i in bill]\n if bill[0] not in list(senators.id): continue\n combos = list(itertools.combinations(bill,2))\n senateG.add_edges_from(combos)\n for pair in combos:\n i = senators.loc[senators.id == int(pair[0]), 'adj_ind']\n j = senators.loc[senators.id == int(pair[1]), 'adj_ind']\n adj_mat[i,j]+=1\n adj_mat[j,i]+=1",
"Set edge weights for Network Object",
"for row in range(len(adj_mat)):\n cols = np.where(adj_mat[row,:])[0]\n i = senators.loc[senators.adj_ind==row,'id']\n i = int(i)\n for col in cols:\n j = senators.loc[senators.adj_ind==col,'id']\n j = int(j)\n senateG[i][j]['bills']=adj_mat[row,col]",
"Thresholding\n\nSome bills have everyone as a sponsor\nThese popular bills are less informative, end up with complete network\nThreshold: Take edges above a certain weight (more than n cosponsorships)\nTry different numbers",
"bill_dict = nx.get_edge_attributes(senateG,'bills')\nelarge=[(i,j) for (i,j) in bill_dict if bill_dict[(i,j)] >40]",
"Look at the network\n\nDifferent layouts possible: <br> https://networkx.github.io/documentation/networkx-1.10/reference/drawing.html",
"nx.draw_spring(senateG, edgelist = elarge,with_labels=True)",
"Take out the singletons to get a clearer picture:",
"senateGt= nx.Graph()\nsenateGt.add_nodes_from(senateG.nodes)\nsenateGt.add_edges_from(elarge)\ndeg = senateGt.degree()\nrem = [n[0] for n in deg if n[1]==0]\nsenateGt_all = senateGt.copy()\nsenateGt.remove_nodes_from(rem)\nnx.draw_spring(senateGt,with_labels=True)",
"Look at the degree distribution\n\nDegree is a tuple listing the group name and the number of partnerships\nAdd to a dataframe\nSeparate the column into two columns using .apply\nPlot a histogram",
"foo=pd.DataFrame({'tup':deg})\ndeg = senateGt.degree()\nfoo = pd.DataFrame(foo)\nfoo[['grp','deg']]=foo['tup'].apply(pd.Series)\nfoo.deg.plot.hist()",
"Look at party in the network\nExtract the party information\n\nDemocrats coded as 100, republicans as 200",
"party = nx.get_node_attributes(senateG,'party')\ndems = []\ngop = []\nfor i in party:\n if party[i]==100: dems.append(i)\n else: gop.append(i)",
"Prepare the Visualization\n\nCreate positional coordinates for the groups with ties, and without ties\nInstantiate dictionaries to hold different sets of coordinates\nLoop through party members\nIf they have no parters, add calculated position to the lonely dictionary\nIf they have partners, add calculated position to the party dictionary",
"pos = nx.spring_layout(senateGt)\npos_all = nx.circular_layout(senateG)\ndem_dict={}\ngop_dict={}\ndem_lone = {}\ngop_lone= {}\nfor n in dems:\n if n in rem: dem_lone[n]=pos_all[n]\n else:dem_dict[n] = pos[n]\nfor n in gop:\n if n in rem: gop_lone[n]=pos_all[n]\n else:gop_dict[n] = pos[n]",
"Visualize the network by party\n\nCreate lists of the party members who have ties\nDraw nodes in four categories using the position dictionaries we created\nparty members, untied party members",
"dems = list(set(dems)-set(rem))\ngop = list(set(gop)-set(rem))\nnx.draw_networkx_nodes(senateGt, pos=dem_dict, nodelist = dems,node_color='b',node_size = 100)\nnx.draw_networkx_nodes(senateGt, pos=gop_dict, nodelist = gop,node_color='r', node_size = 100)\nnx.draw_networkx_nodes(senateG, pos=dem_lone, nodelist = list(dem_lone.keys()),node_color='b',node_size = 200)\nnx.draw_networkx_nodes(senateG, pos=gop_lone, nodelist = list(gop_lone.keys()),node_color='r', node_size = 200)\nnx.draw_networkx_edges(senateGt,pos=pos, edgelist=elarge)",
"Do it again with a lower threshold:",
"dems = list(set(dems)-set(rem))\ngop = list(set(gop)-set(rem))\nnx.draw_networkx_nodes(senateGt, pos=dem_dict, nodelist = dems,node_color='b',node_size = 100)\nnx.draw_networkx_nodes(senateGt, pos=gop_dict, nodelist = gop,node_color='r', node_size = 100)\nnx.draw_networkx_nodes(senateGt_all, pos=dem_lone, nodelist = list(dem_lone.keys()),node_color='b',node_size = 100)\nnx.draw_networkx_nodes(senateGt_all, pos=gop_lone, nodelist = list(gop_lone.keys()),node_color='r', node_size = 100)\nnx.draw_networkx_edges(senateGt,pos=pos, edgelist=elarge)",
"Modularity:\nfraction of edges within a community minus the expected fraction if they were distributed randomly across the whole network\n\nHigh modularity >0 when there are more connections in a community than between communities\nDifferent algorithms to try to maximize this. \nUsed a newer one from NetworkX. Run cell at end of notebook to get this algorithm",
"colors = greedy_modularity_communities(senateGt, weight = 'bills')",
"Visualize the Communities\n\nCalculate a position for all nodes\nSeparate network by the communities \nDraw the first set as red\nDraw the second set as blue\nAdd the edges",
"pos = nx.spring_layout(senateGt)\npos0={}\npos1={}\nfor n in colors[0]:\n pos0[n] = pos[n]\nfor n in colors[1]:\n pos1[n] = pos[n]nx.draw_networkx_nodes(senateGt, pos=pos0, nodelist = colors[0],node_color='r')\nnx.draw_networkx_nodes(senateGt, pos=pos1, nodelist = colors[1],node_color='b')\nnx.draw_networkx_edges(senateGt,pos=pos, edgelist=elarge)",
"How did we do?\nHow many were misclassified\n\nNote: It's random, so you may need to flip the comparison by switching colors[0] and colors[1]\n\nDid pretty well!",
"print('gop misclassification')\nfor i in colors[1]:\n if i in dems: print(i,len(senateGt[i]))\nprint('dem misclassification')\nfor i in colors[0]: \n if i in gop: print(i,len(senateGt[i]))",
"Pretty, but now what?\nStructure is interesting itself\n\nIs polarization changing over time?\nWhat attributes of a senator or environment lead to more in-party cosponsorship. \nUse ERGM or Latent Space Model\nBeyond what we'll cover today, but check out: \nEdward's implementation of Latent Space Models: http://edwardlib.org/tutorials/latent-space-models\nStatnet's ERGM implementation in R: https://statnet.org/trac/raw-attachment/wiki/Sunbelt2016/ergm_tutorial.html\n\nThose are hard, what else?\nEffects of networks\n\nHow does a congressman's betweenness centrality affect their committee placement?\nHow does a congressman's degree centrality affect their reelection?\nHow does a party's modularity affect their ability to accomplish their agenda\nDoes the behavior of tied nodes affect the behavior of a node? \nIs there diffusion across the network? \n\nUse Network Variables in Your Regression\n\nControl for centrality measures or other positional effects\nParty or community as unit of analysis\nUse network lags to account for interdependence (adapt a var or spatial lag model)\nRemember, if you use spatial lags, you need to correct for it in your error term to get unbiased standard errors\np(y) conditional on y for neighboring nodes\nLearn more here. Use your adjacency matrix for W instead of distance decay function.\n http://www.statsref.com/HTML/index.html?car_models.html\n\nLoad some more data from Fowler\n\nSH file\n.tab or .csv, depending on source\npb: sponsored bills passing chamber\npa: sponsored ammendments passing chamber",
"sh = pd.read_csv('SH.tab',sep='\\t')\nsh['dem']= sh.party==100\nsh['dem']=sh.dem*1\nmodel_data = sh.loc[\n (sh.congress == 108) & (sh.chamber=='S'),\n ['ids','dem','pb','pa']\n]\nmodel_data['passed']=model_data.pb+model_data.pa\nmodel_data.set_index('ids',inplace=True)",
"Merge in some network data\n\nRemember: The merge works because they have the same index",
"bet_cent = nx.betweenness_centrality(senateG,weight='bills')\nbet_cent = pd.Series(bet_cent)\ndeg_cent = nx.degree_centrality(senateGt)\ndeg_cent = pd.Series(deg_cent)\nmodel_data['between']=bet_cent\nmodel_data['degree']=deg_cent",
"Degree is not significant",
"y =model_data.loc[:,'passed']\nx =model_data.loc[:,['degree','dem']]\nx['c'] = 1\nols_model1 = sm.OLS(y,x,missing='drop')\nresults = ols_model1.fit()\nprint(results.summary())",
"Betweeness is!\nIt's not how many bills that matter, it's who you cosponsor with",
"y =model_data.loc[:,'passed']\nx =model_data.loc[:,['between','dem']]\nx['c'] = 1\nols_model1 = sm.OLS(y,x,missing='drop')\nresults = ols_model1.fit()\nprint(results.summary())",
"Questions?\nAdd functions from networkx\n\nNetworkX documentation is buggy\nVersion that comes with Anaconda is incomplete\nBelow I pasted a community detection function from their source code\nDon't worry about what it's doing, just run it to add",
"# Some functions from the NetworkX package\n\nimport heapq\nclass MappedQueue(object):\n \"\"\"The MappedQueue class implements an efficient minimum heap. The\n smallest element can be popped in O(1) time, new elements can be pushed\n in O(log n) time, and any element can be removed or updated in O(log n)\n time. The queue cannot contain duplicate elements and an attempt to push an\n element already in the queue will have no effect.\n MappedQueue complements the heapq package from the python standard\n library. While MappedQueue is designed for maximum compatibility with\n heapq, it has slightly different functionality.\n Examples\n --------\n A `MappedQueue` can be created empty or optionally given an array of\n initial elements. Calling `push()` will add an element and calling `pop()`\n will remove and return the smallest element.\n >>> q = MappedQueue([916, 50, 4609, 493, 237])\n >>> q.push(1310)\n True\n >>> x = [q.pop() for i in range(len(q.h))]\n >>> x\n [50, 237, 493, 916, 1310, 4609]\n Elements can also be updated or removed from anywhere in the queue.\n >>> q = MappedQueue([916, 50, 4609, 493, 237])\n >>> q.remove(493)\n >>> q.update(237, 1117)\n >>> x = [q.pop() for i in range(len(q.h))]\n >>> x\n [50, 916, 1117, 4609]\n References\n ----------\n .. [1] Cormen, T. H., Leiserson, C. E., Rivest, R. L., & Stein, C. (2001).\n Introduction to algorithms second edition.\n .. [2] Knuth, D. E. (1997). The art of computer programming (Vol. 3).\n Pearson Education.\n \"\"\"\n\n def __init__(self, data=[]):\n \"\"\"Priority queue class with updatable priorities.\n \"\"\"\n self.h = list(data)\n self.d = dict()\n self._heapify()\n\n def __len__(self):\n return len(self.h)\n\n def _heapify(self):\n \"\"\"Restore heap invariant and recalculate map.\"\"\"\n heapq.heapify(self.h)\n self.d = dict([(elt, pos) for pos, elt in enumerate(self.h)])\n if len(self.h) != len(self.d):\n raise AssertionError(\"Heap contains duplicate elements\")\n\n def push(self, elt):\n \"\"\"Add an element to the queue.\"\"\"\n # If element is already in queue, do nothing\n if elt in self.d:\n return False\n # Add element to heap and dict\n pos = len(self.h)\n self.h.append(elt)\n self.d[elt] = pos\n # Restore invariant by sifting down\n self._siftdown(pos)\n return True\n\n def pop(self):\n \"\"\"Remove and return the smallest element in the queue.\"\"\"\n # Remove smallest element\n elt = self.h[0]\n del self.d[elt]\n # If elt is last item, remove and return\n if len(self.h) == 1:\n self.h.pop()\n return elt\n # Replace root with last element\n last = self.h.pop()\n self.h[0] = last\n self.d[last] = 0\n # Restore invariant by sifting up, then down\n pos = self._siftup(0)\n self._siftdown(pos)\n # Return smallest element\n return elt\n\n def update(self, elt, new):\n \"\"\"Replace an element in the queue with a new one.\"\"\"\n # Replace\n pos = self.d[elt]\n self.h[pos] = new\n del self.d[elt]\n self.d[new] = pos\n # Restore invariant by sifting up, then down\n pos = self._siftup(pos)\n self._siftdown(pos)\n\n def remove(self, elt):\n \"\"\"Remove an element from the queue.\"\"\"\n # Find and remove element\n try:\n pos = self.d[elt]\n del self.d[elt]\n except KeyError:\n # Not in queue\n raise\n # If elt is last item, remove and return\n if pos == len(self.h) - 1:\n self.h.pop()\n return\n # Replace elt with last element\n last = self.h.pop()\n self.h[pos] = last\n self.d[last] = pos\n # Restore invariant by sifting up, then down\n pos = self._siftup(pos)\n self._siftdown(pos)\n\n def _siftup(self, pos):\n \"\"\"Move element at pos down to a leaf by repeatedly moving the smaller\n child up.\"\"\"\n h, d = self.h, self.d\n elt = h[pos]\n # Continue until element is in a leaf\n end_pos = len(h)\n left_pos = (pos << 1) + 1\n while left_pos < end_pos:\n # Left child is guaranteed to exist by loop predicate\n left = h[left_pos]\n try:\n right_pos = left_pos + 1\n right = h[right_pos]\n # Out-of-place, swap with left unless right is smaller\n if right < left:\n h[pos], h[right_pos] = right, elt\n pos, right_pos = right_pos, pos\n d[elt], d[right] = pos, right_pos\n else:\n h[pos], h[left_pos] = left, elt\n pos, left_pos = left_pos, pos\n d[elt], d[left] = pos, left_pos\n except IndexError:\n # Left leaf is the end of the heap, swap\n h[pos], h[left_pos] = left, elt\n pos, left_pos = left_pos, pos\n d[elt], d[left] = pos, left_pos\n # Update left_pos\n left_pos = (pos << 1) + 1\n return pos\n\n def _siftdown(self, pos):\n \"\"\"Restore invariant by repeatedly replacing out-of-place element with\n its parent.\"\"\"\n h, d = self.h, self.d\n elt = h[pos]\n # Continue until element is at root\n while pos > 0:\n parent_pos = (pos - 1) >> 1\n parent = h[parent_pos]\n if parent > elt:\n # Swap out-of-place element with parent\n h[parent_pos], h[pos] = elt, parent\n parent_pos, pos = pos, parent_pos\n d[elt] = pos\n d[parent] = parent_pos\n else:\n # Invariant is satisfied\n break\n return pos\nfrom __future__ import division\n\nimport networkx as nx\nfrom networkx.algorithms.community.quality import modularity\n\ndef greedy_modularity_communities(G, weight=None):\n \"\"\"Find communities in graph using Clauset-Newman-Moore greedy modularity\n maximization. This method currently supports the Graph class and does not\n consider edge weights.\n\n Greedy modularity maximization begins with each node in its own community\n and joins the pair of communities that most increases modularity until no\n such pair exists.\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n Yields sets of nodes, one for each community.\n\n Examples\n --------\n >>> from networkx.algorithms.community import greedy_modularity_communities\n >>> G = nx.karate_club_graph()\n >>> c = list(greedy_modularity_communities(G))\n >>> sorted(c[0])\n [8, 14, 15, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]\n\n References\n ----------\n .. [1] M. E. J Newman 'Networks: An Introduction', page 224\n Oxford University Press 2011.\n .. [2] Clauset, A., Newman, M. E., & Moore, C.\n \"Finding community structure in very large networks.\"\n Physical Review E 70(6), 2004.\n \"\"\"\n\n # Count nodes and edges\n N = len(G.nodes())\n m = sum([d.get('weight', 1) for u, v, d in G.edges(data=True)])\n q0 = 1.0 / (2.0*m)\n\n # Map node labels to contiguous integers\n label_for_node = dict((i, v) for i, v in enumerate(G.nodes()))\n node_for_label = dict((label_for_node[i], i) for i in range(N))\n\n # Calculate degrees\n k_for_label = G.degree(G.nodes(), weight=weight)\n k = [k_for_label[label_for_node[i]] for i in range(N)]\n\n # Initialize community and merge lists\n communities = dict((i, frozenset([i])) for i in range(N))\n merges = []\n\n # Initial modularity\n partition = [[label_for_node[x] for x in c] for c in communities.values()]\n q_cnm = modularity(G, partition)\n\n # Initialize data structures\n # CNM Eq 8-9 (Eq 8 was missing a factor of 2 (from A_ij + A_ji)\n # a[i]: fraction of edges within community i\n # dq_dict[i][j]: dQ for merging community i, j\n # dq_heap[i][n] : (-dq, i, j) for communitiy i nth largest dQ\n # H[n]: (-dq, i, j) for community with nth largest max_j(dQ_ij)\n a = [k[i]*q0 for i in range(N)]\n dq_dict = dict(\n (i, dict(\n (j, 2*q0 - 2*k[i]*k[j]*q0*q0)\n for j in [\n node_for_label[u]\n for u in G.neighbors(label_for_node[i])]\n if j != i))\n for i in range(N))\n dq_heap = [\n MappedQueue([\n (-dq, i, j)\n for j, dq in dq_dict[i].items()])\n for i in range(N)]\n H = MappedQueue([\n dq_heap[i].h[0]\n for i in range(N)\n if len(dq_heap[i]) > 0])\n\n # Merge communities until we can't improve modularity\n while len(H) > 1:\n # Find best merge\n # Remove from heap of row maxes\n # Ties will be broken by choosing the pair with lowest min community id\n try:\n dq, i, j = H.pop()\n except IndexError:\n break\n dq = -dq\n # Remove best merge from row i heap\n dq_heap[i].pop()\n # Push new row max onto H\n if len(dq_heap[i]) > 0:\n H.push(dq_heap[i].h[0])\n # If this element was also at the root of row j, we need to remove the\n # dupliate entry from H\n if dq_heap[j].h[0] == (-dq, j, i):\n H.remove((-dq, j, i))\n # Remove best merge from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Push new row max onto H\n if len(dq_heap[j]) > 0:\n H.push(dq_heap[j].h[0])\n else:\n # Duplicate wasn't in H, just remove from row j heap\n dq_heap[j].remove((-dq, j, i))\n # Stop when change is non-positive\n if dq <= 0:\n break\n\n # Perform merge\n communities[j] = frozenset(communities[i] | communities[j])\n del communities[i]\n merges.append((i, j, dq))\n # New modularity\n q_cnm += dq\n # Get list of communities connected to merged communities\n i_set = set(dq_dict[i].keys())\n j_set = set(dq_dict[j].keys())\n all_set = (i_set | j_set) - set([i, j])\n both_set = i_set & j_set\n # Merge i into j and update dQ\n for k in all_set:\n # Calculate new dq value\n if k in both_set:\n dq_jk = dq_dict[j][k] + dq_dict[i][k]\n elif k in j_set:\n dq_jk = dq_dict[j][k] - 2.0*a[i]*a[k]\n else:\n # k in i_set\n dq_jk = dq_dict[i][k] - 2.0*a[j]*a[k]\n # Update rows j and k\n for row, col in [(j, k), (k, j)]:\n # Save old value for finding heap index\n if k in j_set:\n d_old = (-dq_dict[row][col], row, col)\n else:\n d_old = None\n # Update dict for j,k only (i is removed below)\n dq_dict[row][col] = dq_jk\n # Save old max of per-row heap\n if len(dq_heap[row]) > 0:\n d_oldmax = dq_heap[row].h[0]\n else:\n d_oldmax = None\n # Add/update heaps\n d = (-dq_jk, row, col)\n if d_old is None:\n # We're creating a new nonzero element, add to heap\n dq_heap[row].push(d)\n else:\n # Update existing element in per-row heap\n dq_heap[row].update(d_old, d)\n # Update heap of row maxes if necessary\n if d_oldmax is None:\n # No entries previously in this row, push new max\n H.push(d)\n else:\n # We've updated an entry in this row, has the max changed?\n if dq_heap[row].h[0] != d_oldmax:\n H.update(d_oldmax, dq_heap[row].h[0])\n\n # Remove row/col i from matrix\n i_neighbors = dq_dict[i].keys()\n for k in i_neighbors:\n # Remove from dict\n dq_old = dq_dict[k][i]\n del dq_dict[k][i]\n # Remove from heaps if we haven't already\n if k != j:\n # Remove both row and column\n for row, col in [(k, i), (i, k)]:\n # Check if replaced dq is row max\n d_old = (-dq_old, row, col)\n if dq_heap[row].h[0] == d_old:\n # Update per-row heap and heap of row maxes\n dq_heap[row].remove(d_old)\n H.remove(d_old)\n # Update row max\n if len(dq_heap[row]) > 0:\n H.push(dq_heap[row].h[0])\n else:\n # Only update per-row heap\n dq_heap[row].remove(d_old)\n\n del dq_dict[i]\n # Mark row i as deleted, but keep placeholder\n dq_heap[i] = MappedQueue()\n # Merge i into j and update a\n a[j] += a[i]\n a[i] = 0\n\n communities = [\n frozenset([label_for_node[i] for i in c])\n for c in communities.values()]\n\n return sorted(communities, key=len, reverse=True)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/fairness-indicators
|
g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb
|
apache-2.0
|
[
"Copyright 2020 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"TensorFlow Constrained Optimization Example Using CelebA Dataset\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/responsible_ai/fairness_indicators/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/fairness-indicators/blob/master/g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/fairness-indicators/tree/master/g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/fairness-indicators/g3doc/tutorials/Fairness_Indicators_TFCO_CelebA_Case_Study.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nThis notebook demonstrates an easy way to create and optimize constrained problems using the TFCO library. This method can be useful in improving models when we find that they’re not performing equally well across different slices of our data, which we can identify using Fairness Indicators. The second of Google’s AI principles states that our technology should avoid creating or reinforcing unfair bias, and we believe this technique can help improve model fairness in some situations. In particular, this notebook will:\n\nTrain a simple, unconstrained neural network model to detect a person's smile in images using tf.keras and the large-scale CelebFaces Attributes (CelebA) dataset.\nEvaluate model performance against a commonly used fairness metric across age groups, using Fairness Indicators.\nSet up a simple constrained optimization problem to achieve fairer performance across age groups.\nRetrain the now constrained model and evaluate performance again, ensuring that our chosen fairness metric has improved.\n\nLast updated: 3/11 Feb 2020\nInstallation\nThis notebook was created in Colaboratory, connected to the Python 3 Google Compute Engine backend. If you wish to host this notebook in a different environment, then you should not experience any major issues provided you include all the required packages in the cells below.\nNote that the very first time you run the pip installs, you may be asked to restart the runtime because of preinstalled out of date packages. Once you do so, the correct packages will be used.",
"#@title Pip installs\n!pip install -q -U pip==20.2\n\n!pip install git+https://github.com/google-research/tensorflow_constrained_optimization\n!pip install -q tensorflow-datasets tensorflow\n!pip install fairness-indicators \\\n \"absl-py==0.12.0\" \\\n \"apache-beam<3,>=2.38\" \\\n \"avro-python3==1.9.1\" \\\n \"pyzmq==17.0.0\"\n",
"Note that depending on when you run the cell below, you may receive a warning about the default version of TensorFlow in Colab switching to TensorFlow 2.X soon. You can safely ignore that warning as this notebook was designed to be compatible with TensorFlow 1.X and 2.X.",
"#@title Import Modules\nimport os\nimport sys\nimport tempfile\nimport urllib\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()\n\nimport numpy as np\n\nimport tensorflow_constrained_optimization as tfco\n\nfrom tensorflow_metadata.proto.v0 import schema_pb2\nfrom tfx_bsl.tfxio import tensor_adapter\nfrom tfx_bsl.tfxio import tf_example_record",
"Additionally, we add a few imports that are specific to Fairness Indicators which we will use to evaluate and visualize the model's performance.",
"#@title Fairness Indicators related imports\nimport tensorflow_model_analysis as tfma\nimport fairness_indicators as fi\nfrom google.protobuf import text_format\nimport apache_beam as beam",
"Although TFCO is compatible with eager and graph execution, this notebook assumes that eager execution is enabled by default as it is in TensorFlow 2.x. To ensure that nothing breaks, eager execution will be enabled in the cell below.",
"#@title Enable Eager Execution and Print Versions\nif tf.__version__ < \"2.0.0\":\n tf.compat.v1.enable_eager_execution()\n print(\"Eager execution enabled.\")\nelse:\n print(\"Eager execution enabled by default.\")\n\nprint(\"TensorFlow \" + tf.__version__)\nprint(\"TFMA \" + tfma.VERSION_STRING)\nprint(\"TFDS \" + tfds.version.__version__)\nprint(\"FI \" + fi.version.__version__)",
"CelebA Dataset\nCelebA is a large-scale face attributes dataset with more than 200,000 celebrity images, each with 40 attribute annotations (such as hair type, fashion accessories, facial features, etc.) and 5 landmark locations (eyes, mouth and nose positions). For more details take a look at the paper.\nWith the permission of the owners, we have stored this dataset on Google Cloud Storage and mostly access it via TensorFlow Datasets(tfds).\nIn this notebook:\n* Our model will attempt to classify whether the subject of the image is smiling, as represented by the \"Smiling\" attribute<sup>*</sup>.\n* Images will be resized from 218x178 to 28x28 to reduce the execution time and memory when training.\n* Our model's performance will be evaluated across age groups, using the binary \"Young\" attribute. We will call this \"age group\" in this notebook.\n\n<sup>*</sup> While there is little information available about the labeling methodology for this dataset, we will assume that the \"Smiling\" attribute was determined by a pleased, kind, or amused expression on the subject's face. For the purpose of this case study, we will take these labels as ground truth.",
"gcs_base_dir = \"gs://celeb_a_dataset/\"\nceleb_a_builder = tfds.builder(\"celeb_a\", data_dir=gcs_base_dir, version='2.0.0')\n\nceleb_a_builder.download_and_prepare()\n\nnum_test_shards_dict = {'0.3.0': 4, '2.0.0': 2} # Used because we download the test dataset separately\nversion = str(celeb_a_builder.info.version)\nprint('Celeb_A dataset version: %s' % version)\n\n#@title Test dataset helper functions\nlocal_root = tempfile.mkdtemp(prefix='test-data')\ndef local_test_filename_base():\n return local_root\n\ndef local_test_file_full_prefix():\n return os.path.join(local_test_filename_base(), \"celeb_a-test.tfrecord\")\n\ndef copy_test_files_to_local():\n filename_base = local_test_file_full_prefix()\n num_test_shards = num_test_shards_dict[version]\n for shard in range(num_test_shards):\n url = \"https://storage.googleapis.com/celeb_a_dataset/celeb_a/%s/celeb_a-test.tfrecord-0000%s-of-0000%s\" % (version, shard, num_test_shards)\n filename = \"%s-0000%s-of-0000%s\" % (filename_base, shard, num_test_shards)\n res = urllib.request.urlretrieve(url, filename)",
"Caveats\nBefore moving forward, there are several considerations to keep in mind in using CelebA:\n* Although in principle this notebook could use any dataset of face images, CelebA was chosen because it contains public domain images of public figures.\n* All of the attribute annotations in CelebA are operationalized as binary categories. For example, the \"Young\" attribute (as determined by the dataset labelers) is denoted as either present or absent in the image.\n* CelebA's categorizations do not reflect real human diversity of attributes.\n* For the purposes of this notebook, the feature containing the \"Young\" attribute is referred to as \"age group\", where the presence of the \"Young\" attribute in an image is labeled as a member of the \"Young\" age group and the absence of the \"Young\" attribute is labeled as a member of the \"Not Young\" age group. These are assumptions made as this information is not mentioned in the original paper.\n* As such, performance in the models trained in this notebook is tied to the ways the attributes have been operationalized and annotated by the authors of CelebA.\n* This model should not be used for commercial purposes as that would violate CelebA's non-commercial research agreement.\nSetting Up Input Functions\nThe subsequent cells will help streamline the input pipeline as well as visualize performance.\nFirst we define some data-related variables and define a requisite preprocessing function.",
"#@title Define Variables\nATTR_KEY = \"attributes\"\nIMAGE_KEY = \"image\"\nLABEL_KEY = \"Smiling\"\nGROUP_KEY = \"Young\"\nIMAGE_SIZE = 28\n\n#@title Define Preprocessing Functions\ndef preprocess_input_dict(feat_dict):\n # Separate out the image and target variable from the feature dictionary.\n image = feat_dict[IMAGE_KEY]\n label = feat_dict[ATTR_KEY][LABEL_KEY]\n group = feat_dict[ATTR_KEY][GROUP_KEY]\n\n # Resize and normalize image.\n image = tf.cast(image, tf.float32)\n image = tf.image.resize(image, [IMAGE_SIZE, IMAGE_SIZE])\n image /= 255.0\n\n # Cast label and group to float32.\n label = tf.cast(label, tf.float32)\n group = tf.cast(group, tf.float32)\n\n feat_dict[IMAGE_KEY] = image\n feat_dict[ATTR_KEY][LABEL_KEY] = label\n feat_dict[ATTR_KEY][GROUP_KEY] = group\n\n return feat_dict\n\nget_image_and_label = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY])\nget_image_label_and_group = lambda feat_dict: (feat_dict[IMAGE_KEY], feat_dict[ATTR_KEY][LABEL_KEY], feat_dict[ATTR_KEY][GROUP_KEY])",
"Then, we build out the data functions we need in the rest of the colab.",
"# Train data returning either 2 or 3 elements (the third element being the group)\ndef celeb_a_train_data_wo_group(batch_size):\n celeb_a_train_data = celeb_a_builder.as_dataset(split='train').shuffle(1024).repeat().batch(batch_size).map(preprocess_input_dict)\n return celeb_a_train_data.map(get_image_and_label)\ndef celeb_a_train_data_w_group(batch_size):\n celeb_a_train_data = celeb_a_builder.as_dataset(split='train').shuffle(1024).repeat().batch(batch_size).map(preprocess_input_dict)\n return celeb_a_train_data.map(get_image_label_and_group)\n\n# Test data for the overall evaluation\nceleb_a_test_data = celeb_a_builder.as_dataset(split='test').batch(1).map(preprocess_input_dict).map(get_image_label_and_group)\n# Copy test data locally to be able to read it into tfma\ncopy_test_files_to_local()",
"Build a simple DNN Model\nBecause this notebook focuses on TFCO, we will assemble a simple, unconstrained tf.keras.Sequential model.\nWe may be able to greatly improve model performance by adding some complexity (e.g., more densely-connected layers, exploring different activation functions, increasing image size), but that may distract from the goal of demonstrating how easy it is to apply the TFCO library when working with Keras. For that reason, the model will be kept simple — but feel encouraged to explore this space.",
"def create_model():\n # For this notebook, accuracy will be used to evaluate performance.\n METRICS = [\n tf.keras.metrics.BinaryAccuracy(name='accuracy')\n ]\n\n # The model consists of:\n # 1. An input layer that represents the 28x28x3 image flatten.\n # 2. A fully connected layer with 64 units activated by a ReLU function.\n # 3. A single-unit readout layer to output real-scores instead of probabilities.\n model = keras.Sequential([\n keras.layers.Flatten(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='image'),\n keras.layers.Dense(64, activation='relu'),\n keras.layers.Dense(1, activation=None)\n ])\n\n # TFCO by default uses hinge loss — and that will also be used in the model.\n model.compile(\n optimizer=tf.keras.optimizers.Adam(0.001),\n loss='hinge',\n metrics=METRICS)\n return model",
"We also define a function to set seeds to ensure reproducible results. Note that this colab is meant as an educational tool and does not have the stability of a finely tuned production pipeline. Running without setting a seed may lead to varied results.",
"def set_seeds():\n np.random.seed(121212)\n tf.compat.v1.set_random_seed(212121)",
"Fairness Indicators Helper Functions\nBefore training our model, we define a number of helper functions that will allow us to evaluate the model's performance via Fairness Indicators.\nFirst, we create a helper function to save our model once we train it.",
"def save_model(model, subdir):\n base_dir = tempfile.mkdtemp(prefix='saved_models')\n model_location = os.path.join(base_dir, subdir)\n model.save(model_location, save_format='tf')\n return model_location",
"Next, we define functions used to preprocess the data in order to correctly pass it through to TFMA.",
"#@title Data Preprocessing functions for \ndef tfds_filepattern_for_split(dataset_name, split):\n return f\"{local_test_file_full_prefix()}*\"\n\nclass PreprocessCelebA(object):\n \"\"\"Class that deserializes, decodes and applies additional preprocessing for CelebA input.\"\"\"\n def __init__(self, dataset_name):\n builder = tfds.builder(dataset_name)\n self.features = builder.info.features\n example_specs = self.features.get_serialized_info()\n self.parser = tfds.core.example_parser.ExampleParser(example_specs)\n\n def __call__(self, serialized_example):\n # Deserialize\n deserialized_example = self.parser.parse_example(serialized_example)\n # Decode\n decoded_example = self.features.decode_example(deserialized_example)\n # Additional preprocessing\n image = decoded_example[IMAGE_KEY]\n label = decoded_example[ATTR_KEY][LABEL_KEY]\n # Resize and scale image.\n image = tf.cast(image, tf.float32)\n image = tf.image.resize(image, [IMAGE_SIZE, IMAGE_SIZE])\n image /= 255.0\n image = tf.reshape(image, [-1])\n # Cast label and group to float32.\n label = tf.cast(label, tf.float32)\n\n group = decoded_example[ATTR_KEY][GROUP_KEY]\n \n output = tf.train.Example()\n output.features.feature[IMAGE_KEY].float_list.value.extend(image.numpy().tolist())\n output.features.feature[LABEL_KEY].float_list.value.append(label.numpy())\n output.features.feature[GROUP_KEY].bytes_list.value.append(b\"Young\" if group.numpy() else b'Not Young')\n return output.SerializeToString()\n\ndef tfds_as_pcollection(beam_pipeline, dataset_name, split):\n return (\n beam_pipeline\n | 'Read records' >> beam.io.ReadFromTFRecord(tfds_filepattern_for_split(dataset_name, split))\n | 'Preprocess' >> beam.Map(PreprocessCelebA(dataset_name))\n )",
"Finally, we define a function that evaluates the results in TFMA.",
"def get_eval_results(model_location, eval_subdir):\n base_dir = tempfile.mkdtemp(prefix='saved_eval_results')\n tfma_eval_result_path = os.path.join(base_dir, eval_subdir)\n\n eval_config_pbtxt = \"\"\"\n model_specs {\n label_key: \"%s\"\n }\n metrics_specs {\n metrics {\n class_name: \"FairnessIndicators\"\n config: '{ \"thresholds\": [0.22, 0.5, 0.75] }'\n }\n metrics {\n class_name: \"ExampleCount\"\n }\n }\n slicing_specs {}\n slicing_specs { feature_keys: \"%s\" }\n options {\n compute_confidence_intervals { value: False }\n disabled_outputs{values: \"analysis\"}\n }\n \"\"\" % (LABEL_KEY, GROUP_KEY)\n \n eval_config = text_format.Parse(eval_config_pbtxt, tfma.EvalConfig())\n\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=model_location, tags=[tf.saved_model.SERVING])\n\n schema_pbtxt = \"\"\"\n tensor_representation_group {\n key: \"\"\n value {\n tensor_representation {\n key: \"%s\"\n value {\n dense_tensor {\n column_name: \"%s\"\n shape {\n dim { size: 28 }\n dim { size: 28 }\n dim { size: 3 }\n }\n }\n }\n }\n }\n }\n feature {\n name: \"%s\"\n type: FLOAT\n }\n feature {\n name: \"%s\"\n type: FLOAT\n }\n feature {\n name: \"%s\"\n type: BYTES\n }\n \"\"\" % (IMAGE_KEY, IMAGE_KEY, IMAGE_KEY, LABEL_KEY, GROUP_KEY)\n schema = text_format.Parse(schema_pbtxt, schema_pb2.Schema())\n coder = tf_example_record.TFExampleBeamRecord(\n physical_format='inmem', schema=schema,\n raw_record_column_name=tfma.ARROW_INPUT_COLUMN)\n tensor_adapter_config = tensor_adapter.TensorAdapterConfig(\n arrow_schema=coder.ArrowSchema(),\n tensor_representations=coder.TensorRepresentations())\n # Run the fairness evaluation.\n with beam.Pipeline() as pipeline:\n _ = (\n tfds_as_pcollection(pipeline, 'celeb_a', 'test')\n | 'ExamplesToRecordBatch' >> coder.BeamSource()\n | 'ExtractEvaluateAndWriteResults' >>\n tfma.ExtractEvaluateAndWriteResults(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n output_path=tfma_eval_result_path,\n tensor_adapter_config=tensor_adapter_config)\n )\n return tfma.load_eval_result(output_path=tfma_eval_result_path)\n",
"Train & Evaluate Unconstrained Model\nWith the model now defined and the input pipeline in place, we’re now ready to train our model. To cut back on the amount of execution time and memory, we will train the model by slicing the data into small batches with only a few repeated iterations.\nNote that running this notebook in TensorFlow < 2.0.0 may result in a deprecation warning for np.where. Safely ignore this warning as TensorFlow addresses this in 2.X by using tf.where in place of np.where.",
"BATCH_SIZE = 32\n\n# Set seeds to get reproducible results\nset_seeds()\n\nmodel_unconstrained = create_model()\nmodel_unconstrained.fit(celeb_a_train_data_wo_group(BATCH_SIZE), epochs=5, steps_per_epoch=1000)",
"Evaluating the model on the test data should result in a final accuracy score of just over 85%. Not bad for a simple model with no fine tuning.",
"print('Overall Results, Unconstrained')\nceleb_a_test_data = celeb_a_builder.as_dataset(split='test').batch(1).map(preprocess_input_dict).map(get_image_label_and_group)\nresults = model_unconstrained.evaluate(celeb_a_test_data)",
"However, performance evaluated across age groups may reveal some shortcomings.\nTo explore this further, we evaluate the model with Fairness Indicators (via TFMA). In particular, we are interested in seeing whether there is a significant gap in performance between \"Young\" and \"Not Young\" categories when evaluated on false positive rate.\nA false positive error occurs when the model incorrectly predicts the positive class. In this context, a false positive outcome occurs when the ground truth is an image of a celebrity 'Not Smiling' and the model predicts 'Smiling'. By extension, the false positive rate, which is used in the visualization above, is a measure of accuracy for a test. While this is a relatively mundane error to make in this context, false positive errors can sometimes cause more problematic behaviors. For instance, a false positive error in a spam classifier could cause a user to miss an important email.",
"model_location = save_model(model_unconstrained, 'model_export_unconstrained')\neval_results_unconstrained = get_eval_results(model_location, 'eval_results_unconstrained')",
"As mentioned above, we are concentrating on the false positive rate. The current version of Fairness Indicators (0.1.2) selects false negative rate by default. After running the line below, deselect false_negative_rate and select false_positive_rate to look at the metric we are interested in.",
"tfma.addons.fairness.view.widget_view.render_fairness_indicator(eval_results_unconstrained)",
"As the results show above, we do see a disproportionate gap between \"Young\" and \"Not Young\" categories.\nThis is where TFCO can help by constraining the false positive rate to be within a more acceptable criterion.\nConstrained Model Set Up\nAs documented in TFCO's library, there are several helpers that will make it easier to constrain the problem:\n\ntfco.rate_context() – This is what will be used in constructing a constraint for each age group category.\ntfco.RateMinimizationProblem()– The rate expression to be minimized here will be the false positive rate subject to age group. In other words, performance now will be evaluated based on the difference between the false positive rates of the age group and that of the overall dataset. For this demonstration, a false positive rate of less than or equal to 5% will be set as the constraint.\ntfco.ProxyLagrangianOptimizerV2() – This is the helper that will actually solve the rate constraint problem.\n\nThe cell below will call on these helpers to set up model training with the fairness constraint.",
"# The batch size is needed to create the input, labels and group tensors.\n# These tensors are initialized with all 0's. They will eventually be assigned\n# the batch content to them. A large batch size is chosen so that there are\n# enough number of \"Young\" and \"Not Young\" examples in each batch.\nset_seeds()\nmodel_constrained = create_model()\nBATCH_SIZE = 32\n\n# Create input tensor.\ninput_tensor = tf.Variable(\n np.zeros((BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3), dtype=\"float32\"),\n name=\"input\")\n\n# Create labels and group tensors (assuming both labels and groups are binary).\nlabels_tensor = tf.Variable(\n np.zeros(BATCH_SIZE, dtype=\"float32\"), name=\"labels\")\ngroups_tensor = tf.Variable(\n np.zeros(BATCH_SIZE, dtype=\"float32\"), name=\"groups\")\n\n# Create a function that returns the applied 'model' to the input tensor\n# and generates constrained predictions.\ndef predictions():\n return model_constrained(input_tensor)\n\n# Create overall context and subsetted context.\n# The subsetted context contains subset of examples where group attribute < 1\n# (i.e. the subset of \"Not Young\" celebrity images).\n# \"groups_tensor < 1\" is used instead of \"groups_tensor == 0\" as the former\n# would be a comparison on the tensor value, while the latter would be a\n# comparison on the Tensor object.\ncontext = tfco.rate_context(predictions, labels=lambda:labels_tensor)\ncontext_subset = context.subset(lambda:groups_tensor < 1)\n\n# Setup list of constraints.\n# In this notebook, the constraint will just be: FPR to less or equal to 5%.\nconstraints = [tfco.false_positive_rate(context_subset) <= 0.05]\n\n# Setup rate minimization problem: minimize overall error rate s.t. constraints.\nproblem = tfco.RateMinimizationProblem(tfco.error_rate(context), constraints)\n\n# Create constrained optimizer and obtain train_op.\n# Separate optimizers are specified for the objective and constraints\noptimizer = tfco.ProxyLagrangianOptimizerV2(\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n constraint_optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n num_constraints=problem.num_constraints)\n\n# A list of all trainable variables is also needed to use TFCO.\nvar_list = (model_constrained.trainable_weights + list(problem.trainable_variables) +\n optimizer.trainable_variables())",
"The model is now set up and ready to be trained with the false positive rate constraint across age group.\nNow, because the last iteration of the constrained model may not necessarily be the best performing model in terms of the defined constraint, the TFCO library comes equipped with tfco.find_best_candidate_index() that can help choose the best iterate out of the ones found after each epoch. Think of tfco.find_best_candidate_index() as an added heuristic that ranks each of the outcomes based on accuracy and fairness constraint (in this case, false positive rate across age group) separately with respect to the training data. That way, it can search for a better trade-off between overall accuracy and the fairness constraint.\nThe following cells will start the training with constraints while also finding the best performing model per iteration.",
"# Obtain train set batches.\n\nNUM_ITERATIONS = 100 # Number of training iterations.\nSKIP_ITERATIONS = 10 # Print training stats once in this many iterations.\n\n# Create temp directory for saving snapshots of models.\ntemp_directory = tempfile.mktemp()\nos.mkdir(temp_directory)\n\n# List of objective and constraints across iterations.\nobjective_list = []\nviolations_list = []\n\n# Training iterations.\niteration_count = 0\nfor (image, label, group) in celeb_a_train_data_w_group(BATCH_SIZE):\n # Assign current batch to input, labels and groups tensors.\n input_tensor.assign(image)\n labels_tensor.assign(label)\n groups_tensor.assign(group)\n\n # Run gradient update.\n optimizer.minimize(problem, var_list=var_list)\n\n # Record objective and violations.\n objective = problem.objective()\n violations = problem.constraints()\n\n sys.stdout.write(\n \"\\r Iteration %d: Hinge Loss = %.3f, Max. Constraint Violation = %.3f\"\n % (iteration_count + 1, objective, max(violations)))\n\n # Snapshot model once in SKIP_ITERATIONS iterations.\n if iteration_count % SKIP_ITERATIONS == 0:\n objective_list.append(objective)\n violations_list.append(violations)\n\n # Save snapshot of model weights.\n model_constrained.save_weights(\n temp_directory + \"/celeb_a_constrained_\" +\n str(iteration_count / SKIP_ITERATIONS) + \".h5\")\n\n iteration_count += 1\n if iteration_count >= NUM_ITERATIONS:\n break\n\n# Choose best model from recorded iterates and load that model.\nbest_index = tfco.find_best_candidate_index(\n np.array(objective_list), np.array(violations_list))\n\nmodel_constrained.load_weights(\n temp_directory + \"/celeb_a_constrained_\" + str(best_index) + \".0.h5\")\n\n# Remove temp directory.\nos.system(\"rm -r \" + temp_directory)",
"After having applied the constraint, we evaluate the results once again using Fairness Indicators.",
"model_location = save_model(model_constrained, 'model_export_constrained')\neval_result_constrained = get_eval_results(model_location, 'eval_results_constrained')",
"As with the previous time we used Fairness Indicators, deselect false_negative_rate and select false_positive_rate to look at the metric we are interested in.\nNote that to fairly compare the two versions of our model, it is important to use thresholds that set the overall false positive rate to be roughly equal. This ensures that we are looking at actual change as opposed to just a shift in the model equivalent to simply moving the threshold boundary. In our case, comparing the unconstrained model at 0.5 and the constrained model at 0.22 provides a fair comparison for the models.",
"eval_results_dict = {\n 'constrained': eval_result_constrained,\n 'unconstrained': eval_results_unconstrained,\n}\ntfma.addons.fairness.view.widget_view.render_fairness_indicator(multi_eval_results=eval_results_dict)",
"With TFCO's ability to express a more complex requirement as a rate constraint, we helped this model achieve a more desirable outcome with little impact to the overall performance. There is, of course, still room for improvement, but at least TFCO was able to find a model that gets close to satisfying the constraint and reduces the disparity between the groups as much as possible."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
CELMA-project/CELMA
|
MES/divOfScalarTimesVector/2b-JTimesDivSource/calculations/exactSolutions.ipynb
|
lgpl-3.0
|
[
"Exact solution used in MES runs\nWe would like to MES the operation (in a cylindrical geometry)\n$$\nJ\\nabla \\cdot \\left(S_n\\frac{\\nabla_\\perp \\phi}{B}\\right)\n$$\nAs we have a homogenenous $B$-field, we have normalized it out, and remain with\n$$\nJ\\nabla \\cdot \\left(S_n\\nabla_\\perp \\phi\\right)\n$$",
"%matplotlib notebook\n\nfrom sympy import init_printing\nfrom sympy import S\nfrom sympy import sin, cos, tanh, exp, pi, sqrt\n\nfrom boutdata.mms import x, y, z, t\nfrom boutdata.mms import Delp2, DDX, DDY, DDZ\n\nimport os, sys\n# If we add to sys.path, then it must be an absolute path\ncommon_dir = os.path.abspath('./../../../../common')\n# Sys path is a list of system paths\nsys.path.append(common_dir)\nfrom CELMAPy.MES import get_metric, make_plot, BOUT_print\n\ninit_printing()",
"Initialize",
"folder = '../twoGaussians/'\nmetric = get_metric()",
"Define the variables",
"# Initialization\nthe_vars = {}",
"Define manifactured solutions\nWe have that\n$$JS = J\\nabla\\cdot(S_n\\nabla_\\perp\\phi) = JS_n\\nabla_\\perp^2\\phi + J\\nabla S_n\\cdot \\nabla_\\perp \\phi = JS_n\\nabla_\\perp^2\\phi + J\\nabla_\\perp S_n\\cdot \\nabla_\\perp \\phi$$\nWe will use the Delp2 operator for the perpendicular Laplace operator (as the y-derivatives vanishes in cylinder geometry). We have\nDelp2$(f)=g^{xx}\\partial_x^2 f + g^{zz}\\partial_z^2 f + 2g^{xz}\\partial_x\\partial_z f + G^1\\partial_x f + G^3\\partial_z f$\nUsing the cylinder geometry, we get that\nDelp2$(f)=\\partial_x^2 f + \\frac{1}{x^2}\\partial_z^2 f + \\frac{1}{x}\\partial_x f$\nFurther on, due to orthogonality we have that\n$$\\nabla_\\perp S_n\\cdot \\nabla_\\perp \\phi = \\mathbf{e}^i\\cdot \\mathbf{e}^i(\\partial_i S_n)(\\partial_i \\phi)\n = g^{xx}(\\partial_x S_n)(\\partial_x \\phi) + g^{zz}(\\partial_z S_n)(\\partial_z \\phi) = (\\partial_x S_n)(\\partial_x \\phi) + \\frac{1}{x^2}(\\partial_z S_n)(\\partial_z \\phi)$$\nThis gives\n$$JS = \\nabla\\cdot(S_n\\nabla_\\perp\\phi) = xS_n\\partial_x^2 \\phi + S_n\\frac{1}{x}\\partial_z^2 \\phi + S_n\\partial_x \\phi + (\\partial_x S_n)(\\partial_x \\phi) + \\frac{1}{x}(\\partial_z S_n)(\\partial_z \\phi)$$\nWe will use this to calculate the analytical solution.\nNOTE:\n\nz must be periodic\nThe field $f(\\rho, \\theta)$ must be of class infinity in $z=0$ and $z=2\\pi$\nThe field $f(\\rho, \\theta)$ must be single valued when $\\rho\\to0$\nThe field $f(\\rho, \\theta)$ must be continuous in the $\\rho$ direction with $f(\\rho, \\theta + \\pi)$\nEventual BC in $\\rho$ must be satisfied",
"# We need Lx\nfrom boututils.options import BOUTOptions\nmyOpts = BOUTOptions(folder)\nLx = eval(myOpts.geom['Lx'])\n\n# Two normal gaussians\n\n# The gaussian\n# In cartesian coordinates we would like\n# f = exp(-(1/(2*w^2))*((x-x0)^2 + (y-y0)^2))\n# In cylindrical coordinates, this translates to\n# f = exp(-(1/(2*w^2))*(x^2 + y^2 + x0^2 + y0^2 - 2*(x*x0+y*y0) ))\n# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta)*cos(theta0)+sin(theta)*sin(theta0)) ))\n# = exp(-(1/(2*w^2))*(rho^2 + rho0^2 - 2*rho*rho0*(cos(theta - theta0)) ))\n\nw = 0.8*Lx\nrho0 = 0.3*Lx\ntheta0 = 5*pi/4\nthe_vars['phi'] = exp(-(1/(2*w**2))*(x**2 + rho0**2 - 2*x*rho0*(cos(z - theta0)) ))\n\nw = 0.5*Lx\nrho0 = 0.2*Lx\ntheta0 = 0\nthe_vars['S_n'] = exp(-(1/(2*w**2))*(x**2 + rho0**2 - 2*x*rho0*(cos(z - theta0)) ))",
"Calculate the solution",
"the_vars['S'] = metric.J*( the_vars['S_n']*Delp2(the_vars['phi'], metric=metric)\\\n + metric.g11*DDX(the_vars['S_n'], metric=metric)*DDX(the_vars['phi'], metric=metric)\\\n + metric.g33*DDZ(the_vars['S_n'], metric=metric)*DDZ(the_vars['phi'], metric=metric)\\\n )",
"Plot",
"make_plot(folder=folder, the_vars=the_vars, plot2d=True, include_aux=False)",
"Print the variables in BOUT++ format",
"BOUT_print(the_vars, rational=False)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
radical-cybertools/supercomputing2015-tutorial
|
03_analytics/Kmeans.ipynb
|
apache-2.0
|
[
"KMeans: Scitkit, Pilot and Spark/MLlib\nThis is perhaps the best known database to be found in the pattern recognition literature. The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant (see https://archive.ics.uci.edu/ml/datasets/Iris). \nSource: R. A. Fisher, The Use of Multiple Measurements in Taxonomic Problems, 1936, http://rcs.chemometrics.ru/Tutorials/classification/Fisher.pdf\nPictures (Source Wikipedia)\n<table>\n<tr><td>\nSetosa\n</td><td>\nVersicolor\n</td><td>\nVirginica\n</td></tr>\n<tr><td>\n<img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/5/56/Kosaciec_szczecinkowaty_Iris_setosa.jpg/180px-Kosaciec_szczecinkowaty_Iris_setosa.jpg\"/> \n</td><td>\n<img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/4/41/Iris_versicolor_3.jpg/320px-Iris_versicolor_3.jpg\"/>\n</td><td>\n<img src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/9/9f/Iris_virginica.jpg/295px-Iris_virginica.jpg\"/>\n</td></tr></table>\n\n1. Data Overview\nWe will begin by loading the data into a Pandas dataframe.",
"%matplotlib inline\nimport pandas as pd\nimport seaborn as sns\nimport numpy as np\n\ndata = pd.read_csv(\"https://raw.githubusercontent.com/pydata/pandas/master/pandas/tests/data/iris.csv\")\ndata.head()",
"2. Scikit\nScikit is a machine learning library for Python built upon numpy and matplotlib. It provides functions for classification, regression, clustering and other common analytics tasks.",
"from sklearn.cluster import KMeans\nkmeans = KMeans(n_clusters=3)\nresults = kmeans.fit_predict(data[['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth']])\n\ndata_kmeans=pd.concat([data, pd.Series(results, name=\"ClusterId\")], axis=1)\ndata_kmeans.head()",
"In the following we evaluate the resulting fit (commonly referred to as the model), using the sum of squared errors and a pair plot. The following pair plot shows the scatter-plot between each of the four features. Clusters for the different species are indicated by different colors.",
"print \"Sum of squared error: %.1f\"%kmeans.inertia_\n\nsns.pairplot(data_kmeans, vars=[\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\"], hue=\"ClusterId\");",
"3. Pilot Approach\nWe will now use RADICAL-Pilot to compute the distance function, as a simple representation of how the above example can be executed as a task-parallel application.",
"import os, sys\nimport commands\nimport radical.pilot as rp\nos.environ[\"RADICAL_PILOT_DBURL\"]=\"mongodb://ec2-54-221-194-147.compute-1.amazonaws.com:24242/giannis\"\n\ndef print_details(detail_object):\n if type(detail_object)==str:\n detail_object = ast.literal_eval(detail_object)\n for i in detail_object:\n detail_object[i]=str(detail_object[i])\n #print str(detail_object)\n return pd.DataFrame(detail_object.values(), \n index=detail_object.keys(), \n columns=[\"Value\"])\n\n\nsession = rp.Session()\nc = rp.Context('ssh')\nc.user_id = \"radical\"\nsession.add_context(c)\npmgr = rp.PilotManager(session=session)\numgr = rp.UnitManager (session=session,\n scheduler=rp.SCHED_DIRECT_SUBMISSION)\nprint \"Session id: %s Pilot Manager: %s\" % (session.uid, str(pmgr.as_dict()))\npdesc = rp.ComputePilotDescription ()\npdesc.resource = \"local.localhost_anaconda\"\npdesc.runtime = 10 \npdesc.cores = 16\npdesc.cleanup = False\npilot = pmgr.submit_pilots(pdesc)\numgr = rp.UnitManager (session=session,\n scheduler=rp.SCHED_DIRECT_SUBMISSION)\numgr.add_pilots(pilot)",
"In the following, we will partition the data and distribute it to a set of CUs for fast processing",
"number_clusters = 3\nclusters = data.sample(number_clusters)\n\nclusters\n\nclusters.to_csv(\"clusters.csv\")\ndata.to_csv(\"points.csv\")",
"Helper Function for computing new centroids as mean of all points assigned to a cluster",
"def compute_new_centroids(distances):\n df = pd.DataFrame(distances)\n df[4] = df[4].astype(int)\n df = df.groupby(4)[0,1,2,3].mean()\n centroids_np = df.as_matrix()\n return centroids_np",
"Running Mapper Function as an External Process",
"for i in range(10):\n distances =!/opt/anaconda/bin/python mapper.py points.csv clusters.csv\n distances_np = np.array(eval(\" \".join(distances)))\n new_centroids = compute_new_centroids(distances_np)\n new_centroids_df = pd.DataFrame(new_centroids, columns=[\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\"])\n new_centroids_df.to_csv(\"clusters.csv\") ",
"Running Mapper Function inside RADICAL-Pilot\nHelper function to read output from completed compute units after it has been executed inside the Pilot.",
"import urlparse\n\ndef get_output(compute_unit):\n working_directory=compute_unit.as_dict()['working_directory']\n path = urlparse.urlparse(working_directory).path\n output=open(os.path.join(path, \"STDOUT\")).read()\n return output",
"This is the main application loop. The distance computation is executed inside a ComputeUnit. See mapper.py for code. Data is read from files and written to stdout. We execute 10 iterations of KMeans.",
"for i in range(10):\n cudesc = rp.ComputeUnitDescription()\n cudesc.executable = \"/opt/anaconda/bin/python\"\n cudesc.arguments = [os.path.join(os.getcwd(), \"mapper.py\"),\n os.path.join(os.getcwd(), \"points.csv\"),\n os.path.join(os.getcwd(), \"clusters.csv\")]\n cu_set = umgr.submit_units([cudesc])\n umgr.wait_units()\n output = get_output(cu_set[0])\n distances_np = np.array(eval(output))\n new_centroids = compute_new_centroids(distances_np)\n new_centroids_df = pd.DataFrame(new_centroids, columns=[\"SepalLength\", \"SepalWidth\", \"PetalLength\", \"PetalWidth\"])\n new_centroids_df.to_csv(\"clusters.csv\") \n print \"Finished iteration: %d\"%(i)",
"Print out final centroids computed",
"new_centroids_df\n\nsession.close()",
"Spark MLLib\nIn the following we utilize the Spark MLlib KMeans implementation. See http://spark.apache.org/docs/latest/mllib-clustering.html#k-means\nWe use Pilot-Spark to startup Spark.",
"from numpy import array\nfrom math import sqrt\n\n%run ../env.py\n%run ../util/init_spark.py\n\nfrom pilot_hadoop import PilotComputeService as PilotSparkComputeService\n\ntry:\n sc\nexcept:\n pilotcompute_description = {\n \"service_url\": \"yarn-client://sc15.radical-cybertools.org\",\n \"number_of_processes\": 5\n }\n pilot_spark = PilotSparkComputeService.create_pilot(pilotcompute_description=pilotcompute_description)\n\n sc = pilot_spark.get_spark_context()\n sqlCtx=SQLContext(sc)",
"Load and parse the data in a Spark DataFrame.",
"data_spark=sqlCtx.createDataFrame(data)\ndata_spark_without_class=data_spark.select('SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth').show()",
"Convert DataFrame to Tuple for MLlib",
"data_spark_tuple = data_spark.map(lambda a: (a[0],a[1],a[2],a[3]))\n\nfrom pyspark.mllib.clustering import KMeans, KMeansModel\nclusters = KMeans.train(data_spark_tuple, 3, maxIterations=10,\n runs=10, initializationMode=\"random\")\n\n# Evaluate clustering by computing Within Set Sum of Squared Errors\ndef error(point):\n center = clusters.centers[clusters.predict(point)]\n return sqrt(sum([x**2 for x in (point - center)]))\n\nWSSSE = data_spark_tuple.map(lambda point: error(point)).reduce(lambda x, y: x + y)\nprint(\"Within Set Sum of Squared Error = \" + str(WSSSE))",
"Stop Pilot-Job",
"pilot_spark.cancel()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
andrzejkrawczyk/python-course
|
part_1/01.Wstęp.ipynb
|
apache-2.0
|
[
"Kurs Python Część 1\nZespół Learnetic\nAndrzej Krawczyk\nPython\n\nJęzyk wysokiego poziomu\nJęzyk skryptowy\nJęzyk interpretowany\nJęzyk dynamiczny\nJęzyk szerokiego zastosowania\n\nCechy Pythona\n\nDynamiczne typowanie\nAutomatyczne zarządzanie pamięcią\nGarbage Collector\nObiektowo orientowany\n\nWbudowane typy\n\n\nBatteries included\n\nOpen source\nPrzenośny\nŁatwy w nauce\nSzybki proces implementacji\nBardzo czytelny i elegancki\n\nZastosowania Pythona\n\nGUI (wxPython, PyGtk, pyQT)\nAplikacje Webowe (Django, DjangoCMS, Flask)\nIntegracje komponentów\nData science (Anaconda, Jupyter)\nGry, grafika\nBłyskawiczne prototypowanie\n\nWiele paradygmatów\n\nProgramowanie strukturalne\nProgramowanie obiektowe\nProgramowanie funkcyjne\n\nZen of Python\n\nBeautiful is better than ugly.\nExplicit is better than implicit.\nSimple is better than complex.\nComplex is better than complicated.\nFlat is better than nested.\nSparse is better than dense.\n\nReadability counts.\nSpecial cases aren't special enough to break the rules.\nAlthough practicality beats purity.\nErrors should never pass silently.\nUnless explicitly silenced.\nIn the face of ambiguity, refuse the temptation to guess.\nThere should be one-- and preferably only one --obvious way to do it.\nAlthough that way may not be obvious at first unless you're Dutch.\nNow is better than never.\nAlthough never is often better than right now.\nIf the implementation is hard to explain, it's a bad idea.\nIf the implementation is easy to explain, it may be a good idea.\nNamespaces are one honking great idea -- let's do more of those!\n\nPierwszy Program\nx86\n\nC\n\nJava\n\nPerl\n\nprint \"Hello World!\\n\";\n\nPython 2.x\n\nprint \"Hello world!\"\n\nPython 3.x",
"print(\"Hello world!\")",
"Uruchomiene Aplikacji\n\n\nZ poziomu konsoli\n > python program.py\n\n\nShebang (Unix)\n > #!/usr/bin/env python3\n\n\nPrawa wykonywania\n > chmod u+x program.py\n >\n\n\nInteraktywny Interpreter\n > ipython\n\n\nSkładnia\n\ninstrukcje nie są zakończone średnikiem\nbloki kodu są definiowane przez wcięcia\nwiele angielskich słów\nkomentarz #\nzminimalizowany zbiór instrukcji\n\nPEP8\n\nwcięcia 4 spacje\nDługość lini 79\nImportowanie per linia, na poczatku modułu, kolejność priorytetowa\n(2 + 3), 4",
"\n#wyrównanie do nawiasu otwierającego\nfoo = moja_dluga_funkcja(zmienna_jeden, zmienna_dwa\n zmienna_trzy, zmienna_cztery)\n\n# zwiększone wcięcia aby rozróżnić funkcję od ciała funkcji\ndef moja_dluga_funkcja(\n zmienna_jeden, zmienna_dwa, zmienna_trzy,\n zmienna_cztery):\n print(zmienna_jeden)\n\nfoo = moja_dluga_funkcja(\n zmienna_jeden, zmienna_dwa,\n zmienna_trzy, zmienna_cztery)\n\n# Może być w tym przypadku inna ilość wcięć niż 4\nfoo = moja_dluga_funkcja(\n zmienna_jeden, zmienna_dwa,\n zmienna_trzy, zmienna_cztery)\n\nmoja_lista = [\n 1, 2, 3,\n 4, 5, 6,\n ]\n\nwynik = funkcja_przyjmujaca_argumenty(\n 'a', 'b', 'c',\n 'd', 'e', 'f',\n )\n\n# dozwolone też wyrównanie z początkiem lini otwierającej wielowierszową konstrukcję\n\nmoja_lista = [\n 1, 2, 3,\n 4, 5, 6,\n]\n\nwynik = funkcja_przyjmujaca_argumenty(\n 'a', 'b', 'c',\n 'd', 'e', 'f',\n)\n\nprzychod = (zarobki_brutto\n + zwrot_z_podatku\n + (dywidendy - podatek_od_dywidend)\n - ubezpieczenie_samochodu\n - kredyt_studencki)\n\n#Dobrze:\nimport os\nimport sys\n\n#Źle: \nimport sys, os\n \n#Dobrze:\nfrom subprocess import Popen, PIPE \n\n\"\"\"Przykładowy moduł.\n\nCoś tutaj się dzieje.\n\"\"\"\n\nfrom __future__ import jakiś_moduł\n\n__all__ = ['a', 'b', 'c']\n__version__ = '0.1'\n__author__ = 'Andrzej Krawczyk'\n\nimport os\nimport sys\n\n#Dobrze:\ndef average(count, length=5):\n return foo(c=count, l=length)\n\n#Źle:\ndef complex(count, length = 5):\n return foo(c = count, l = length)",
"Biblioteki, Narzędzia\n\nrequests\nscrapy\ntwisted / tornado\nmypy - optional static typying for python\npython-attrs www.attrs.org\nipython\n\n<center><h1>Golang następcą Pythona?</h1></center>\nPython 2 / 3\n\nCPython\nCython\nJython\nIronPython\nPyPy\nStackless Python"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
boffi/boffi.github.io
|
dati_2018/wt03/Continuous.ipynb
|
mit
|
[
"Continuous System\nImports, stuff",
"from sympy import *\ninit_printing(use_latex=True)\nfrom IPython.display import Latex\n%matplotlib inline",
"Symbols",
"x, w2 = symbols('x omega^2')\nL, m, EJ = symbols('L m EJ', positive = True)\nA, B, C, D, ld, LD = symbols('A B C D lambda Lambda')\nf, φ = symbols('f phi')",
"Supported mass and stiffness of support",
"mass_coeff = 8\nstiff_coeff = 24\nk = stiff_coeff*EJ/L**3\nM = mass_coeff*m*L",
"General solution and its derivatives",
"f0 = A*cos(ld*x) + B*sin(ld*x) + C*cosh(ld*x) + D*sinh(ld*x)\nf1 = f0.diff(x)\nf2 = f1.diff(x)\nf3 = f2.diff(x)\ndisplay(Eq(φ,f0))",
"Left boundary conditions\nThe eigenfunction and its second derivative must be zero when 0 is substituted for x, we solve for A and C and put the solution in the variable AC.\nWe substitute our solution in the eigenfunctions and all of its derivatives.",
"AC = solve((f0.subs(x,0), f2.subs(x,0)), A, C, dict=True)\nf0, f1, f2, f3 = [f.subs(AC[0]) for f in (f0, f1, f2, f3)]\ndisplay(Eq(φ, f0))",
"First, simpler boundary condition at the right end, $x=L$.\nThe second derivative must be equal to zero, so we solve and substitute, also substitute $\\lambda L$ with $\\Lambda$",
"D = solve(f2.subs(x, L), D, dict=True)\nf0, f1, f2, f3 = [f.subs(D[0]).subs(L,LD/ld) for f in (f0, f1, f2, f3)]\ndisplay(Latex('With $\\\\Lambda = \\\\lambda\\\\,L$ it is'))\ndisplay(Eq(φ, f0.simplify()))",
"Last boundary conditions, equation of wave numbers\nThe last equation is an equation of equilibrium\n$$V(t) + k\\, v(t) + M\\,\\ddot v(t) = 0$$\n(all the forces are directed upwards).\nWith $v(t)=\\phi(x)\\,\\sin\\omega t$, the shear is $V = -EJ\\, v''' = -EJ\\, \\phi'''(x)\\sin\\omega t$ and\nthe inertial force is $M\\,\\ddot v= -M\\,\\phi\\,\\omega^2\\sin\\omega t$ that can be rewritten taking into account that $\\omega^2=\\lambda^4EJ/m$: $\\text{ }M\\,\\ddot v= - M/m\\,EJ\\,\\lambda^4\\phi\\sin\\omega t$.\nLet's write the expanded equation, collecting all the terms that are no $\\Lambda$:",
"eq = (f0*k - f0*M*ld**4*EJ/m - EJ*f3).subs(x, L).subs(L, LD/ld)\ndisplay(Eq(eq.expand().collect(B).collect(ld).collect(EJ), 0))",
"We have a non trivial solution when the term in brackets is equal to zero, to have the bracketed term we must divide both members by $B\\,EJ\\, \\lambda^3$",
"eq = (eq/EJ/ld**3/B).expand()\ndisplay(Eq(eq,0))",
"The behavior near $\\Lambda=0$ is led by the last term that goes like $48/\\Lambda^2$, so to have a nice plot we multiply everything by $\\Lambda^2$",
"display(Eq(symbols('f'), (eq*LD**2).expand()))\nplot(eq*LD**2, (LD, 0, 2));",
"and see that there is a root between 1.25 and 1.5. If we were interested in upper roots, we can observe that all the terms in the LHS of our determinantal equations are bounded for increasing $\\Lambda$ except for the first one, that grows linearly, so to investigate the other roots we may divide the equation by $\\Lambda$ to remove that trend...",
"display(Eq(symbols('f'), (eq/LD).expand()))\nplot(eq/LD, (LD, 2, 10));",
"All the RHS terms except the first have $\\Lambda$ in the denominator and are bounded, so the asymptotic behaviour is controlled by $\\Lambda_{n+1}=n\\pi$.",
"from scipy.optimize import bisect\nf = lambdify(LD, eq, modules='math')\nl1 = bisect(f, 0.5, 1.5)\nLatex(r'$\\lambda_1=%.6f\\,\\frac{1}{L}, \\quad\\omega_1^2=%.6f\\,\\frac{EJ}{mL^4}$'%(l1, l1**4))",
"Rayleigh Quotient\nUsing $v=\\frac xL\\sin\\omega t$ (that is, a rigid rotation about the left hinge) we have\n$$T_\\text{max}=\\frac12\\omega^2\\Big(\\int_0^Lm\\left(\\frac xL\\right)^2dx + M\\,1^2\\Big) =\n\\frac12\\omega^2\\Big(\\frac13+8\\Big)mL\n$$\nand\n$$V_\\text{max}=\\frac12\\Big(\\int_0^L EJ\\left(\\frac xL\\right)''^2 + k\\,1^2\\Big) =\n\\frac12\\Big(0+24\\Big)\\,\\frac{EJ}{L^3}.\n$$\nEquating the maximum energies and solving for $\\omega^2$ gives\n$$ \\omega^2 = \\frac{24\\,EJ/L^3}{\\frac{25}3\\,mL} = 3\\,\\frac{24}{25}\\,\\frac{EJ}{mL^4}=\\ldots\n$$",
"display(Latex(r'$\\omega^2_{R00} = %.3f\\,\\frac{EJ}{mL^4}$'%(3*24/25)))",
"We can say that the RQ check reinforces our previouos finding..."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jtryan/camera-calibration
|
camera_calibration.ipynb
|
mit
|
[
"%%HTML\n<style> code {background-color : pink !important;} </style>",
"Camera Calibration with OpenCV\nRun the code in the cell below to extract object points and image points for camera calibration.",
"import numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\n%matplotlib qt\n\n# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\nobjp = np.zeros((6*8,3), np.float32)\nobjp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1,2)\n\n# Arrays to store object points and image points from all the images.\nobjpoints = [] # 3d points in real world space\nimgpoints = [] # 2d points in image plane.\n\n# Make a list of calibration images\nimages = glob.glob('calibration_wide/GO*.jpg')\n\n# Step through the list and search for chessboard corners\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (8,6), None)\n\n # If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n # Draw and display the corners\n cv2.drawChessboardCorners(img, (8,6), corners, ret)\n #write_name = 'corners_found'+str(idx)+'.jpg'\n #cv2.imwrite(write_name, img)\n cv2.imshow('img', img)\n cv2.waitKey(500)\n\ncv2.destroyAllWindows()",
"If the above cell ran sucessfully, you should now have objpoints and imgpoints needed for camera calibration. Run the cell below to calibrate, calculate distortion coefficients, and test undistortion on an image!",
"import pickle\n%matplotlib inline\n\n# Test undistortion on an image\nimg = cv2.imread('calibration_wide/test_image.jpg')\nimg_size = (img.shape[1], img.shape[0])\n\n# Do camera calibration given object points and image points\nret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\n\n\ndst = cv2.undistort(img, mtx, dist, None, mtx)\ncv2.imwrite('calibration_wide/test_undist.jpg',dst)\n\n# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\ndist_pickle = {}\ndist_pickle[\"mtx\"] = mtx\ndist_pickle[\"dist\"] = dist\npickle.dump( dist_pickle, open( \"calibration_wide/wide_dist_pickle.p\", \"wb\" ) )\n#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)\n# Visualize undistortion\nf, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))\nax1.imshow(img)\nax1.set_title('Original Image', fontsize=30)\nax2.imshow(dst)\nax2.set_title('Undistorted Image', fontsize=30)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
schaber/deep-learning
|
gan_mnist/Intro_to_GANs_Exercises.ipynb
|
mit
|
[
"Generative Adversarial Network\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\nGANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\nPix2Pix \nCycleGAN\nA whole list\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can fool the discriminator.\nThe output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.",
"%matplotlib inline\n\nimport pickle as pkl\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data')",
"Model Inputs\nFirst we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.\n\nExercise: Finish the model_inputs function below. Create the placeholders for inputs_real and inputs_z using the input sizes real_dim and z_dim respectively.",
"def model_inputs(real_dim, z_dim):\n inputs_real = tf.placeholder(tf.float32, [None, real_dim], name='input_real')\n inputs_z = tf.placeholder(tf.float32, [None, z_dim], name='input_z')\n \n return inputs_real, inputs_z",
"Generator network\n\nHere we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\nVariable Scope\nHere we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.\nWe could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.\nTo use tf.variable_scope, you use a with statement:\npython\nwith tf.variable_scope('scope_name', reuse=False):\n # code here\nHere's more from the TensorFlow documentation to get another look at using tf.variable_scope.\nLeaky ReLU\nTensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can just take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:\n$$\nf(x) = max(\\alpha * x, x)\n$$\nTanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.\n\nExercise: Implement the generator network in the function below. You'll need to return the tanh output. Make sure to wrap your code in a variable scope, with 'generator' as the scope name, and pass the reuse keyword argument from the function to tf.variable_scope.",
"def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):\n ''' Build the generator network.\n \n Arguments\n ---------\n z : Input tensor for the generator\n out_dim : Shape of the generator output\n n_units : Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('generator', reuse=reuse): # finish this\n # Hidden layer\n #h1 = tf.contrib.layers.fully_connected(z, n_units)\n h1 = tf.layers.dense(z, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha*h1, h1)\n \n # Logits and tanh output\n #logits = tf.contrib.layers.fully_connected(h1, out_dim)\n logits = tf.layers.dense(h1, out_dim)\n out = tf.tanh(logits)\n \n return out",
"Discriminator\nThe discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.\n\nExercise: Implement the discriminator network in the function below. Same as above, you'll need to return both the logits and the sigmoid output. Make sure to wrap your code in a variable scope, with 'discriminator' as the scope name, and pass the reuse keyword argument from the function arguments to tf.variable_scope.",
"def discriminator(x, n_units=128, reuse=False, alpha=0.01):\n ''' Build the discriminator network.\n \n Arguments\n ---------\n x : Input tensor for the discriminator\n n_units: Number of units in hidden layer\n reuse : Reuse the variables with tf.variable_scope\n alpha : leak parameter for leaky ReLU\n \n Returns\n -------\n out, logits: \n '''\n with tf.variable_scope('discriminator', reuse=reuse): # finish this\n # Hidden layer\n #h1 = tf.contrib.layers.fully_connected(x, n_units)\n h1 = tf.layers.dense(x, n_units, activation=None)\n # Leaky ReLU\n h1 = tf.maximum(alpha*h1, h1)\n \n #logits = tf.contrib.layers.fully_connected(h1, 1)\n logits = tf.layers.dense(h1, 1, activation=None)\n out = tf.sigmoid(logits)\n \n return out, logits",
"Hyperparameters",
"# Size of input image to discriminator\ninput_size = 784 # 28x28 MNIST images flattened\n# Size of latent vector to generator\nz_size = 100\n# Sizes of hidden layers in generator and discriminator\ng_hidden_size = 128\nd_hidden_size = 128\n# Leak factor for leaky ReLU\nalpha = 0.01\n# Label smoothing \nsmooth = 0.1",
"Build network\nNow we're building the network from the functions defined above.\nFirst is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.\nThen, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.\nThen the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).\n\nExercise: Build the network from the functions you defined earlier.",
"tf.reset_default_graph()\n# Create our input placeholders\ninput_real, input_z = model_inputs(input_size, z_size)\n\n# Generator network here\ng_model = generator(input_z, input_size)\n# g_model is the generator output\n\n# Disriminator network here\nd_model_real, d_logits_real = discriminator(input_real)\nd_model_fake, d_logits_fake = discriminator(g_model, reuse=True)",
"Discriminator and Generator Losses\nNow we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will be sigmoid cross-entropies, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like \npython\ntf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))\nFor the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)\nThe discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\nFinally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.\n\nExercise: Calculate the losses for the discriminator and the generator. There are two discriminator losses, one for real images and one for fake images. For the real image loss, use the real logits and (smoothed) labels of ones. For the fake image loss, use the fake logits with labels of all zeros. The total discriminator loss is the sum of those two losses. Finally, the generator loss again uses the fake logits from the discriminator, but this time the labels are all ones because the generator wants to fool the discriminator.",
"# Calculate losses\nd_loss_real = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_real, labels=tf.ones_like(d_logits_real)*(1.0-smooth)\n ))\n\nd_loss_fake = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_fake, labels=tf.zeros_like(d_logits_real)\n ))\n\nd_loss = d_loss_real + d_loss_fake\n\ng_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)\n ))",
"Optimizers\nWe want to update the generator and discriminator variables separately. So we need to get the variables for each part and build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.\nFor the generator optimizer, we only want the generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables that start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance). \nWe can do something similar with the discriminator. All the variables in the discriminator start with discriminator.\nThen, in the optimizer we pass the variable lists to the var_list keyword argument of the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.\n\nExercise: Below, implement the optimizers for the generator and discriminator. First you'll need to get a list of trainable variables, then split that list into two lists, one for the generator variables and another for the discriminator variables. Finally, using AdamOptimizer, create an optimizer for each network that update the network variables separately.",
"for v in tf.trainable_variables():\n print(v.name)\n\n# Optimizers\nlearning_rate = 0.002\n\n# Get the trainable_variables, split into G and D parts\nt_vars = tf.trainable_variables()\ng_vars = []\nd_vars = []\nfor v in t_vars:\n if v.name.startswith('generator'):\n g_vars.append(v)\n elif v.name.startswith('discriminator'):\n d_vars.append(v)\n else:\n print('Unexpected variable: {}'.format(v))\n\nd_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)\ng_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)",
"Training",
"batch_size = 100\nepochs = 100\nsamples = []\nlosses = []\nsaver = tf.train.Saver(var_list = g_vars)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n \n # Get images, reshape and rescale to pass to D\n batch_images = batch[0].reshape((batch_size, 784))\n batch_images = batch_images*2 - 1\n \n # Sample random noise for G\n batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n \n # Run optimizers\n _ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})\n _ = sess.run(g_train_opt, feed_dict={input_z: batch_z})\n \n # At the end of each epoch, get the losses and print them out\n train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})\n train_loss_g = g_loss.eval({input_z: batch_z})\n \n print(\"Epoch {}/{}...\".format(e+1, epochs),\n \"Discriminator Loss: {:.4f}...\".format(train_loss_d),\n \"Generator Loss: {:.4f}\".format(train_loss_g)) \n # Save losses to view after training\n losses.append((train_loss_d, train_loss_g))\n \n # Sample from generator as we're training for viewing afterwards\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\n samples.append(gen_samples)\n saver.save(sess, './checkpoints/generator.ckpt')\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)",
"Training loss\nHere we'll check out the training losses for the generator and discriminator.",
"%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nfig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()",
"Generator samples from training\nHere we can view samples of images from the generator. First we'll look at images taken while training.",
"def view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n \n return fig, axes\n\n# Load samples from generator taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)",
"These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.",
"_ = view_samples(-1, samples)",
"Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!",
"rows, cols = 10, 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)",
"It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.\nSampling from the generator\nWe can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!",
"saver = tf.train.Saver(var_list=g_vars)\nwith tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))\n sample_z = np.random.uniform(-1, 1, size=(16, z_size))\n gen_samples = sess.run(\n generator(input_z, input_size, reuse=True),\n feed_dict={input_z: sample_z})\nview_samples(0, [gen_samples])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
thewtex/ieee-nss-mic-scipy-2014
|
3_SciPy_Stack.ipynb
|
apache-2.0
|
[
"The SciPy Stack\n\nNumPy: Base N-dimensional array package\nSciPy: Fundamental library for scientific computing\nMatplotlib: Comprehensive 2D Plotting\nIPython: Enhanced Interactive Console\nSympy: Symbolic mathematics\npandas: Data structures & analysis\nnose: Unit testing\n\n\nThis is one of the 100 recipes of the IPython Cookbook, the definitive guide to high-performance scientific computing and data science in Python.\n\nNumPy\n<img src=\"images/numpylogo.png\">\nLet's import the built-in random Python module and NumPy.",
"import random\nimport numpy as np",
"We use the %precision magic (defined in IPython) to show only 3 decimals in the Python output. This is just to alleviate the text.",
"%precision 3",
"We generate two Python lists x and y, each one containing one million random numbers between 0 and 1.",
"n = 1000000\nx = [random.random() for _ in range(n)]\ny = [random.random() for _ in range(n)]\n\nx[:3], y[:3]",
"Let's compute the element-wise sum of all these numbers: the first element of x plus the first element of y, and so on. We use a for loop in a list comprehension.",
"z = [x[i] + y[i] for i in range(n)]\nz[:3]",
"How long does this computation take? IPython defines a handy %timeit magic command to quickly evaluate the time taken by a single command.",
"%timeit [x[i] + y[i] for i in range(n)]",
"Now, we will perform the same operation with NumPy. NumPy works on multidimensional arrays, so we need to convert our lists to arrays. The np.array() function does just that.",
"xa = np.array(x)\nya = np.array(y)\n\nxa[:3]",
"The arrays xa and ya contain the exact same numbers than our original lists x and y. Whereas those lists where instances of a built-in class list, our arrays are instances of a NumPy class ndarray. Those types are implemented very differently in Python and NumPy. We will see that, in this example, using arrays instead of lists leads to drastic performance improvements.\nNow, to compute the element-wise sum of these arrays, we don't need to do a for loop anymore. In NumPy, adding two arrays means adding the elements of the arrays component by component.",
"za = xa + ya\nza[:3]",
"We see that the list z and the array za contain the same elements (the sum of the numbers in x and y).\nLet's compare the performance of this NumPy operation with the native Python loop.",
"%timeit xa + ya",
"We observe that this operation is more than one order of magnitude faster in NumPy than in pure Python!\nNow, we will compute something else: the sum of all elements in x or xa. Although this is not an element-wise operation, NumPy is still highly efficient here. The pure Python version uses the built-in sum function on an iterable. The NumPy version uses the np.sum() function on a NumPy array.",
"%timeit sum(x) # pure Python\n%timeit np.sum(xa) # NumPy",
"We also observe an impressive speedup here.\nSciPy\n<img src=\"images/scipylogo.png\">\nConsists of a number of more specific packages:\n\nSpecial functions (scipy.special)\nIntegration (scipy.integrate)\nOptimization (scipy.optimize)\nInterpolation (scipy.interpolate)\nFourier Transforms (scipy.fftpack)\nSignal Processing (scipy.signal)\nLinear Algebra (scipy.linalg)\nSparse Eigenvalue Problems (scipy.sparse)\nStatistics (scipy.stats)\nMulti-dimensional image processing (scipy.ndimage)\nFile IO (scipy.io)\n\nMatplotlib\n<img src=\"images/matplotliblogo.png\">",
"# The IPython Notebook historically has a tight integration with\n# Matplotlib. To display plots rendered inline in the notebook, the\n# ipython magic can be called\n%matplotlib inline\n\n# While it is still possible to include expose the call-for-call%\n# MATLAB-like API with\n# from pylab import *\n# or \n# %pylab inline\n# polluting the working namespace in this way is now discouraged.\n# Instead, the preferred import is\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-5, 5, 100)\ny = x**3\n\nplt.figure()\nplt.plot(x, y, 'g')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('title')\nplt.show()\n\n# Even better is the object-oriented API\nfig = plt.figure()\naxes = fig.add_subplot(111)\naxes.plot(x, y, 'g')\naxes.set_xlabel('x')\naxes.set_xlabel('y')\naxes.set_title('title');",
"This is one of the 100 recipes of the IPython Cookbook, the definitive guide to high-performance scientific computing and data science in Python.\n\nSymPy\n<img src=\"images/sympylogo.png\">\nFirst, we import SymPy, and enable rich display LaTeX-based printing in the IPython notebook (using the MathJax Javascript library).",
"from sympy import *\ninit_printing()\n\n# Create symbolic variables\nx, y = symbols('x y')\n\n# Create mathematical expressions\nexpr1 = (x + 1)**2\nexpr2 = x**2 + 2*x + 1\n\nexpr1\n\nexpr2\n\nexpr1 == expr2\n\nsimplify(expr1-expr2)\n\n# Substitution\nexpr1.subs(x, expr1)\n\nexpr1.subs(x, pi)\n\n# S converts an arbitrary expression to a type that can be used inside\n# SymPy\nexpr1.subs(x, S(1)/2)\n\n# evalf() goes from symbolic to numerical representation\n_.evalf()\n\n# Create a function that evaluates SymPy expressions on NumPy arrays\nf = lambdify(x, expr1)\nf(np.linspace(-2., 2., 5))",
"Pandas\n<img src=\"images/pandaslogo.jpg\">\n\nThis is one of the 100 recipes of the IPython Cookbook, the definitive guide to high-performance scientific computing and data science in Python.",
"# Load a CSV file into a Pandas DataFrame\nimport pandas as pd\nurl = \"http://donnees.ville.montreal.qc.ca/storage/f/2014-01-20T20%3A48%3A50.296Z/2013.csv\"\ndf = pd.read_csv(url, index_col='Date', parse_dates=True, dayfirst=True)\n\ndf.head(2)\n\ndf.describe()",
"Learn more!\n\nSciPy.org Website - A good index of all the SciPy stack documentation and more."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
karst87/ml
|
dev/pyml/datacamp/kaggle-python-tutorial-on-machine-learning/01_getting-started-with-python.ipynb
|
mit
|
[
"getting-started-with-python\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=1\n1. How it works\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=1\nWelcome to our Kaggle Machine Learning Tutorial. In this tutorial, you will explore how to tackle Kaggle Titanic competition using Python and Machine Learning. In case you're new to Python, it's recommended that you first take our free Introduction to Python for Data Science Tutorial. Furthermore, while not required, familiarity with machine learning techniques is a plus so you can get the maximum out of this tutorial.\n\nIn the editor on the right, you should type Python code to solve the exercises. When you hit the 'Submit Answer' button, every line of code is interpreted and executed by Python and you get a message whether or not your code was correct. The output of your Python code is shown in the console in the lower right corner. Python makes use of the # sign to add comments; these lines are not run as Python code, so they will not influence your result.\n\nYou can also execute Python commands straight in the console. This is a good way to experiment with Python code, as your submission is not checked for correctness.\n\nInstructions\nIn the editor to the right, you see some Python code and annotations. This is what a typical exercise will look like.\n\nTo complete the exercise and see how the interactive environment works add the code to compute y and hit the Submit Answer button. Don't forget to print the result.",
"#Compute x = 4 * 3 and print the result\nx = 4 * 3\nprint(x)\n\n#Compute y = 6 * 9 and print the result\ny = 6 * 9\nprint(y)",
"2. Get the Data with Pandas\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=2\nWhen the Titanic sank, 1502 of the 2224 passengers and crew were killed. One of the main reasons for this high level of casualties was the lack of lifeboats on this self-proclaimed \"unsinkable\" ship.\n\nThose that have seen the movie know that some individuals were more likely to survive the sinking (lucky Rose) than others (poor Jack). In this course, you will learn how to apply machine learning techniques to predict a passenger's chance of surviving using Python.\n\nLet's start with loading in the training and testing set into your Python environment. You will use the training set to build your model, and the test set to validate it. The data is stored on the web as csv files; their URLs are already available as character strings in the sample code. You can load this data with the read_csv() method from the Pandas library.\n\nInstructions\nFirst, import the Pandas library as pd.\n\nLoad the test data similarly to how the train data is loaded.\n\nInspect the first couple rows of the loaded dataframes using the .head() method with the code provided.",
"# Import the Pandas library\nimport pandas as pd\n\nkaggle_path = \"http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/\"\n\n# Load the train and test datasets to create two DataFrames\ntrain_url = kaggle_path + \"train.csv\"\ntrain = pd.read_csv(train_url)\n\ntest_url = kaggle_path + \"test.csv\"\ntest = pd.read_csv(test_url)\n\n#Print the `head` of the train and test dataframes\nprint(train.head())\nprint(test.head())",
"3.Understanding your data\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=3\nBefore starting with the actual analysis, it's important to understand the structure of your data. Both test and train are DataFrame objects, the way pandas represent datasets. You can easily explore a DataFrame using the .describe() method. .describe() summarizes the columns/features of the DataFrame, including the count of observations, mean, max and so on. Another useful trick is to look at the dimensions of the DataFrame. This is done by requesting the .shape attribute of your DataFrame object. (ex. your_data.shape)\n\nThe training and test set are already available in the workspace, as train and test. Apply .describe() method and print the .shape attribute of the training set. Which of the following statements is correct?\n\nPossible Answers\nThe training set has 891 observations and 12 variables, count for Age is 714.\nThe training set has 418 observations and 11 variables, count for Age is 891.\nThe testing set has 891 observations and 11 variables, count for Age is 891.\nThe testing set has 418 observations and 12 variables, count for Age is 714.",
"train.describe()\n\ntest.describe()\n\ntrain.shape\n\ntest.shape",
"4. Rose vs Jack, or Female vs Male\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=4\nHow many people in your training set survived the disaster with the Titanic? To see this, you can use the value_counts() method in combination with standard bracket notation to select a single column of a DataFrame:\n\n# absolute numbers\ntrain[\"Survived\"].value_counts()\n# percentages\ntrain[\"Survived\"].value_counts(normalize = True)\n\nIf you run these commands in the console, you'll see that 549 individuals died (62%) and 342 survived (38%). A simple way to predict heuristically could be: \"majority wins\". This would mean that you will predict every unseen observation to not survive.\n\nTo dive in a little deeper we can perform similar counts and percentage calculations on subsets of the Survived column. For example, maybe gender could play a role as well? You can explore this using the .value_counts() method for a two-way comparison on the number of males and females that survived, with this syntax:\n\ntrain[\"Survived\"][train[\"Sex\"] == 'male'].value_counts()\ntrain[\"Survived\"][train[\"Sex\"] == 'female'].value_counts()\n\nTo get proportions, you can again pass in the argument normalize = True to the .value_counts() method.\n\nInstructions\nCalculate and print the survival rates in absolute numbers using values_counts() method.\n\nCalculate and print the survival rates as proportions by setting the normalize argument to True.\n\nRepeat the same calculations but on subsets of survivals based on Sex",
"# absoulte numbers\ntrain['Survived'].value_counts()\n\n# percentages\ntrain['Survived'].value_counts(normalize=True)\n\ntrain['Survived'][train['Sex']=='male'].value_counts()\n\ntrain['Survived'][train['Sex'] =='female'].value_counts()\n\n# Passengers that survived vs passengers that passed away\nprint(train['Survived'].value_counts())\n\n# As proportions\nprint(train['Survived'].value_counts(normalize=True))\n\n# Males that survived vs males that passed away\nprint(train['Survived'][train['Sex']=='male'].value_counts())\n\n# Females that survived vs Females that passed away\nprint(train['Survived'][train['Sex']=='female'].value_counts())\n\n# Normalized male survival\nprint(train['Survived'][train['Sex']=='male'].value_counts(normalize=True))\n\n# Normalized female survival\nprint(train['Survived'][train['Sex']=='female'].value_counts(normalize=True))\n",
"5.Does age play a role?\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=5\nAnother variable that could influence survival is age; since it's probable that children were saved first. You can test this by creating a new column with a categorical variable Child. Child will take the value 1 in cases where age is less than 18, and a value of 0 in cases where age is greater than or equal to 18.\n\nTo add this new variable you need to do two things (i) create a new column, and (ii) provide the values for each observation (i.e., row) based on the age of the passenger.\n\nAdding a new column with Pandas in Python is easy and can be done via the following syntax:\nyour_data[\"new_var\"] = 0\n\nThis code would create a new column in the train DataFrame titled new_var with 0 for each observation.\n\nTo set the values based on the age of the passenger, you make use of a boolean test inside the square bracket operator. With the []-operator you create a subset of rows and assign a value to a certain variable of that subset of observations. For example,\ntrain[\"new_var\"][train[\"Fare\"] > 10] = 1\n\nwould give a value of 1 to the variable new_var for the subset of passengers whose fares greater than 10. Remember that new_var has a value of 0 for all other values (including missing values).\n\nA new column called Child in the train data frame has been created for you that takes the value NaN for all observations.\n\nInstructions\nSet the values of Child to 1 is the passenger's age is less than 18 years.\n\nThen assign the value 0 to observations where the passenger is greater than or equal to 18 years in the new Child column.\n\nCompare the normalized survival rates for those who are <18 and those who are older. Use code similar to what you had in the previous exercise.",
"# Create the column Child and assign to 'NaN'\ntrain[\"Child\"] = float('NaN')\n\n# Assign 1 to passengers under 18, 0 to those 18 or older. Print the new column.\n# train['Child'][train['Age'] >= 18] = 0\n# train['Child'][train['Age'] < 18] = 1\ntrain.loc[train['Age'] >= 18, 'Child'] = 0\ntrain.loc[train['Age'] < 18, 'Child'] = 1\nprint(train['Child'])\n\n# Print normalized Survival Rates for passengers under 18\nprint(train[\"Survived\"][train[\"Child\"] == 1].value_counts(normalize = True))\n\n# Print normalized Survival Rates for passengers 18 or older\nprint(train[\"Survived\"][train[\"Child\"] == 0].value_counts(normalize = True))",
"6.First Prediction\nhttps://campus.datacamp.com/courses/kaggle-python-tutorial-on-machine-learning/getting-started-with-python?ex=6\nIn one of the previous exercises you discovered that in your training set, females had over a 50% chance of surviving and males had less than a 50% chance of surviving. Hence, you could use this information for your first prediction: all females in the test set survive and all males in the test set die.\n\nYou use your test set for validating your predictions. You might have seen that contrary to the training set, the test set has no Survived column. You add such a column using your predicted values. Next, when uploading your results, Kaggle will use this variable (= your predictions) to score your performance.\n\nInstructions\nCreate a variable test_one, identical to dataset test\nAdd an additional column, Survived, that you initialize to zero.\nUse vector subsetting like in the previous exercise to set the value of Survived to 1 for observations whose Sex equals \"female\".\nPrint the Survived column of predictions from the test_one dataset.",
"# Create a copy of test: test_one\ntest_one = test\n\n# Initialize a Survived column to 0\ntest_one['Survived'] = 0\n\n# Set Survived to 1 if Sex equals \"female\" and print the `Survived` column from `test_one`\n# test_one['Survived'][test_one['Sex'] == 'female'] = 1\ntest_one.loc[test_one['Sex'] == 'female', 'Survived'] = 1\nprint(test_one['Survived'])"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
amirziai/learning
|
probabilistic-graphical-models/bayes-net.ipynb
|
mit
|
[
"import numpy as np\nimport pandas as pd\nfrom itertools import product\nfrom functools import reduce\n\nvariables = {\n 'I': ['h', 'vh'],\n 'G': ['A', 'B'],\n 'GPA': ['[0,3)', '[3,3.5)', '[3.5,4.0]']\n}\n\ndf = pd.DataFrame([\n dict(zip(variables.keys(), values))\n for values in product(*variables.values())\n])\ndf['prob'] = np.random.randint(1, 10, len(df))\ndf['prob'] /= df['prob'].sum()\ndf\n\ndef joint_probability(df, y, xs):\n df_y = df[df['G'] == y]\n p_y = df_y['prob'].sum()\n \n df_ = df_y\n l = []\n for col in xs:\n df_ = df_[df_[col] == xs[col]]\n p = df_['prob'].sum()\n l.append(p)\n \n return p_y * reduce(lambda x, y: x * y, l)\n\ndef joint_probability_with_cond_ind(df, y, xs):\n df_y = df[df['G'] == y]\n p_y = df_y['prob'].sum()\n \n l = [\n df_y[df_y[col] == xs[col]]['prob'].sum()\n for col in xs\n ]\n \n return p_y * reduce(lambda x, y: x * y, l)\n\ndef naive_bayes(df, y, xs):\n df_y = df[df['G'] == y]\n p_y = df_y['prob'].sum()\n \n l = [\n df_y[df_y[col] == xs[col]]['prob'].sum()\n for col in xs\n ]\n \n num = p_y * reduce(lambda x, y: x * y, l)\n den = sum([\n reduce(lambda x, y: x * y, [\n df[(df['G'] == y_val) & (df[col] == xs[col])]['prob'].sum()\n for col in xs\n ])\n for y_val in df['G'].unique()\n ])\n \n return num / den",
"Joint distribution\n$$p(Y, X_1, ..., X_N)=p(Y)p(X_1|Y)\\prod_{i=2}^{n}(X_i|X_1, ..., X_{i-1}, y)$$",
"joint_probability(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'vh'})\n\njoint_probability(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'h'})\n\njoint_probability(df, 'A', {'GPA': '[3,3.5)', 'I': 'h'})\n\njoint_probability(df, 'A', {'GPA': '[3,3.5)', 'I': 'vh'})",
"With conditional independence\n$X_1, ..., X_n$ conditionally independent given $Y$\n$$p(Y, X_1, ..., X_N)=p(Y)\\prod_{i=1}^{n}(X_i|Y)$$\nNeed 2n + 1 parameters: 1 for Y~Ber(p) and 2 for each variable because there are two possible values of Y",
"joint_probability_with_cond_ind(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'vh'})\n\njoint_probability_with_cond_ind(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'h'})",
"Naive Bayes",
"naive_bayes(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'vh'})\n\nnaive_bayes(df, 'B', {'GPA': '[3.5,4.0]', 'I': 'vh'})\n\nnaive_bayes(df, 'A', {'GPA': '[3.5,4.0]', 'I': 'h'})\n\nnaive_bayes(df, 'B', {'GPA': '[3.5,4.0]', 'I': 'h'})\n\nnaive_bayes(df, 'A', {'GPA': '[0,3)', 'I': 'h'})\n\nnaive_bayes(df, 'B', {'GPA': '[0,3)', 'I': 'h'})"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
edinburghlivinglab/D4I
|
viz/python/Simplify_Bin_Sensor_Data.ipynb
|
apache-2.0
|
[
"How to get a simple dataset from a big CSV file\nThis notebook goes through some steps to convert a large unwieldy file from the Council's Litter Bin Sensor Project into a simpler dataset for visualisation. We use a number of functions provided by the Python pandas library.\nWe assume that we've already downloaded the file to our local file system. So our first step is to import the CSV file as a pandas DataFrame:",
"import pandas as pd\ntable = pd.read_csv(\"../data/binsensors.csv\")",
"Let's have a look at the column labels:",
"list(table.columns.values)",
"Suppose we just want to select a couple of columns, we can use the column labels like this:",
"table[['ID', 'Address']]",
"But a couple of interesting columns (for the collection date and the weight measured by the sensor) have very complicated labels, so let's simplify them.\nFirst, we'll just make a list of all the labels, then we'll bind the relevant string values to a couple of variables. This means that we don't have to worry about mis-typing things like 'Date & Time of bin collection (Europe/London)!",
"l = list(table.columns.values)\ndate = l[8]\nfill = l[10]\ndate, fill",
"Now that we've got short variables date and time in place of the long strings, let's go ahead and replace those labels with something simpler:",
"table = table.rename(columns={date: 'Date', fill: 'Fill_level'})",
"Now we'll make a new table with just four columns:",
"table1 = table[['ID', 'Address', 'Date', 'Fill_level']]",
"And we'll just take the first 30 rows:",
"tabletop = table1.head(30)\n\ntabletop",
"Finally, we'll write the result to a JSON formatted file.",
"tabletop.to_json('../data/binsensorsimple.json', orient=\"records\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
gfeiden/Notebook
|
Daily/20151123_agb_inner_boundary.ipynb
|
mit
|
[
"RHD Model Atmosphere Inner Boundary\nExploring the properties of RHD model atmosphere inner boundaries for AGB stars. Liljegren finds that some models fail to converge due to a temperature instability. Temperatures at the inner boundary increase rapidly before finally reaching a temperature that is outside of the pre-computed opacity grid.\nThe question becomes, is the temperature instability the result of missing physics near the inner boundary? We suspect that the lack of a description of convection for the inner layers of the atmosphere model are causing a false build-up of heat, leading to the runaway temperature instability. To explore whether this is the case, we'll need to first take a look at the thermodynamic state of the gas in the envelope.\nThermodynamics\nRHD models adopted by Liljegren are based on the RHD model atmosphere developed by Höfner et al. (2003, A&A, 399, 589). For simplicity, and to allow for better comparison with previous studies, the RHD models adopt a perfect gas equation of state with $\\gamma = 5/3$ and $\\mu = 1.26$. The mean molecular weight is that for a non-interacting (i.e., perfect) gas without consideration of ionization states, which effectively equates the mean molecular weight with the mean atomic molecular weight.\nLiljegren provided the following properties of the gas near the inner boundary prior to the temperature instability: $P_{\\rm gas} = 1680\\ {\\rm Ba}$ with $T = 5600\\ {\\rm K}$. Using the perfect gas equation of state, one can derive a density for the gas\n\\begin{equation}\n \\rho = \\frac{\\mu m_p}{k}\\frac{P_{\\rm gas}}{T_{\\rm gas}}\n\\end{equation}\nwhere $k$ is the Boltzmann constant and $m_p$ is the mass of a proton. We therefore find,",
"rho = (1.26*1.6726219e-24/1.3806488e-16)*(1680./5600.)\n\nprint \"Density of the gas [g/cm**3] = {:11.5e}.\".format(rho)",
"We can also easily estimate the thermodynamic properties of the gas under the assumption of a perfect gas. We know that $\\gamma = c_P / c_V = 5/3$. Of importance for our hypothesis is knowing the various thermodynamic temperature gradients, $\\nabla_{\\rm ad}$ and $\\nabla_{\\rm rad}$. The former can be expressed generally as\n\\begin{equation}\n \\nabla_{\\rm ad} = \\frac{P_{\\rm gas} \\delta}{\\rho T_{\\rm gas} c_P},\n\\end{equation}\nwhere $\\delta = - (\\partial\\ln\\rho / \\partial\\ln T_{\\rm gas})P$ (coefficient of thermal expansion) and $c_P$ is the specific heat at constant pressure. The latter can be evaluated using the general thermodynamic relation,\n\\begin{equation}\n c_P - c_V = \\frac{P{\\rm gas} \\delta^2}{\\rho T_{\\rm gas} \\alpha},\n\\end{equation}\nwith $\\alpha = (\\partial\\ln\\rho / \\partial\\ln P_{\\rm gas})T$ (compressibility coefficient). Under the assumption that the gas particles are non-interacting and neglecting ionization, we have $\\alpha = \\delta = 1$. Therefore we can write\n\\begin{equation}\n c_P = \\frac{5}{2} \\frac{P{\\rm gas}}{\\rho T_{\\rm gas}},\n\\end{equation}\nmeaning\n\\begin{equation}\n \\nabla_{\\rm ad} = \\frac{2}{5} = 0.4.\n\\end{equation}\nConvective Stability\nNow, to understand whether convection may or may not be important under these conditions, we can consider a simple comparison of the adiabatic and radiative temperature gradients. Adiabatic temperature gradient is a limiting case for the temperature gradient if the gas is undergoing convection, but should provide an estimate of the super-adiabaticity of the temperature gradient. \nThe radiative temperature gradient is \n\\begin{equation}\n \\nabla_{\\rm rad} = \\left(\\frac{d\\ln T_{\\rm gas}}{d\\ln P_{\\rm gas}}\\right){\\rm rad} = \\frac{3}{16\\pi acG}\\frac{\\kappa P{\\rm gas}}{T_{\\rm gas}^4}\\frac{L_r}{M_r},\n\\end{equation}\nwhere $X_r$ are quantities defined at a radius $r$. For our purposes, we can assume $L_r = L_{\\star}$ and $M_r = M_{\\star}$. This is justified as there are no energy generation sources in the outer envelope of an AGB star and the outer envelope contains a negliglbe fraction of the total mass. We can esitmate the Rosseland mean opacity from low temperature opacity sources (e.g., Ferguson et al. 2005), which yield an opacity with the range of $10^{-2}$ - $10^{-1}\\ {\\rm cm}^2\\, {\\rm g}^{-1}$. Thus,",
"def del_rad(opac, P, T, L, M):\n return 7.62586e9*opac*P*L/(T**4*M)\n\nP = 1680.0 # Ba\nT = 5600.0 # K\nM = 1.5 # Msun\nL = 6953.0 # Rstar = 412 Rsun; Teff = 2600 K\n\nprint \"{:11.5e} < Del_rad < {:11.5e}\".format(del_rad(1.0e-2, P, T, L, M), del_rad(1.0e-1, P, T, L, M))",
"In the case where the radiative opacity is close to $10^{-2}\\ {\\rm cm}^2\\, {\\rm g}^{-1}$, the radiative gradient may exceed the adiabatic temperature gradient, suggesting that layers may be unstable to convective instabilities. It is therefore quite possible that thermodynamic conditions developing at the inner boundary of the RHD atmosphere models are ammenable to convective energy transport.\nSince the inner boundary ignores convection and ionization, a temperature purturbation may lead to a growing instability in the temperature as radiation is becomes less able to transport the flux (at a given temperature) and ionization stages of hydrogen do not exist to alter the specific heat when the temperature begins to rapidly increase toward $8\\,000 - 10\\,000\\ {\\rm K}$. \nA temperature instability is all inevitable once conditions become ammenable to convection (at least within the framework of adiabaic convection). Radiation will be unable to efficiently transport flux and the neglect of physics related to convective flux transport will force the local temperature gradient (and thus the deep interior temperature) to force the flux through the gas using only radiation. \nWe may be able to estimate what temperature is required, assuming the pressure is defined by the condition for hydrostatic equilibrium. The approximate conditions required for radiation to carry all of the flux through an ideal gas (neglecting ionization) is \n\\begin{equation}\n \\nabla_{\\rm rad} = \\nabla_{\\rm ad} = \\frac{2}{5}.\n\\end{equation}\nTherefore, the local temperature can be approximated at the inner boundary\n\\begin{equation}\n T_{\\rm gas} = \\left(\\frac{15}{32\\pi acG} \\kappa P_{\\rm gas} \\frac{L_r}{M_r}\\right )^{1/4}.\n\\end{equation}\nFor conditions at the inner boundary, the temperature must exceed",
"def T_rad(opac, P, L, M):\n return (1.906465e10*opac*P*L/M)**0.25\n\nprint \"{:11.5e} < T < {:11.5e} K\".format(T_rad(1.0e-2, 1680., L, M), T_rad(1.0e-1, 1680., L, M))",
"which produces a density",
"rho1 = (1.26*1.6726219e-24/1.3806488e-16)*(1680./6207.)\nrho2 = (1.26*1.6726219e-24/1.3806488e-16)*(1680./11038.)\n\nprint \"{:11.5e} < Density < {:11.5e} [g cm**-3].\".format(rho1, rho2)",
"Thus, the density could be forced to decrease by a factor of two compared to predictions from a non-ideal gas if the local conditions become favorable to convective instability, but the models neglect physics related to convective flux transport.\nWe have seen that increasing the local temperature will decrease the local density, if the layer is to maintain a constant pressure. Opacity in cool, low density environments is an increasing function of temperature and a decreasing function of density. Therefore, an increase in the local temperature and decrease in the local density will cause a dramatic increase in the opacity."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
jobovy/misc-notebooks
|
inference/ABC-examples.ipynb
|
bsd-3-clause
|
[
"import time\nimport numpy\nfrom galpy.util import bovy_plot\n%pylab inline\nnumpy.random.seed(1)",
"Examples of ABC inference\nCoin flip with two flips\nWe've flipped a coin twice and gotten heads twice. What is the probability for getting heads?",
"data= ['H','H']\noutcomes= ['T','H']\n\ndef coin_ABC():\n while True:\n h= numpy.random.uniform()\n flips= numpy.random.binomial(1,h,size=2)\n if outcomes[flips[0]] == data[0] \\\n and outcomes[flips[1]] == data[1]:\n yield h\n\nhsamples= []\nstart= time.time()\nfor h in coin_ABC():\n hsamples.append(h)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(hsamples)\n\ndum= hist(hsamples,bins=31,lw=2.,normed=True,zorder=0)\nplot(numpy.linspace(0.,1.,1001),numpy.linspace(0.,1.,1001)**2.*3.,lw=3.)\nxlabel(r'$h$')\nylabel(r'$p(h|D)$')",
"Coin flip with 10 flips\nSame with 10 flips, still matching the entire sequence:",
"data= ['T', 'H', 'H', 'T', 'T', 'H', 'H', 'T', 'H', 'H']\n\ndef coin_ABC_10flips():\n while True:\n h= numpy.random.uniform()\n flips= numpy.random.binomial(1,h,size=len(data))\n if outcomes[flips[0]] == data[0] \\\n and outcomes[flips[1]] == data[1] \\\n and outcomes[flips[2]] == data[2] \\\n and outcomes[flips[3]] == data[3] \\\n and outcomes[flips[4]] == data[4] \\\n and outcomes[flips[5]] == data[5] \\\n and outcomes[flips[6]] == data[6] \\\n and outcomes[flips[7]] == data[7] \\\n and outcomes[flips[8]] == data[8] \\\n and outcomes[flips[9]] == data[9]:\n yield h\n\nhsamples= []\nstart= time.time()\nfor h in coin_ABC_10flips():\n hsamples.append(h)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(hsamples)\n\ndum= hist(hsamples,bins=31,lw=2.,normed=True,zorder=0)\nxs= numpy.linspace(0.,1.,1001)\nys= xs**numpy.sum([d == 'H' for d in data])*(1.-xs)**numpy.sum([d == 'T' for d in data])\nys/= numpy.sum(ys)*(xs[1]-xs[0])\nplot(xs,ys,lw=3.)\nxlabel(r'$h$')\nylabel(r'$p(h|D)$')",
"Using a sufficient statistic instead:",
"sufficient_data= numpy.sum([d == 'H' for d in data])\ndef coin_ABC_10flips_suff():\n while True:\n h= numpy.random.uniform()\n flips= numpy.random.binomial(1,h,size=len(data))\n if numpy.sum(flips) == sufficient_data:\n yield h\n\nhsamples= []\nstart= time.time()\nfor h in coin_ABC_10flips_suff():\n hsamples.append(h)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(hsamples)\n\ndum= hist(hsamples,bins=31,lw=2.,normed=True,zorder=0)\nxs= numpy.linspace(0.,1.,1001)\nys= xs**numpy.sum([d == 'H' for d in data])*(1.-xs)**numpy.sum([d == 'T' for d in data])\nys/= numpy.sum(ys)*(xs[1]-xs[0])\nplot(xs,ys,lw=3.)\nxlabel(r'$h$')\nylabel(r'$p(h|D)$')",
"Variance of a Gaussian with zero mean\nNow we infer the variance of a Gaussian with zero mean using ABC:",
"data= numpy.random.normal(size=100)\n\ndef Var_ABC(threshold=0.05):\n while True:\n v= numpy.random.uniform()*4\n sim= numpy.random.normal(size=len(data))*numpy.sqrt(v)\n d= numpy.fabs(numpy.var(sim)-numpy.var(data))\n if d < threshold:\n yield v\n\nvsamples= []\nstart= time.time()\nfor v in Var_ABC(threshold=0.05):\n vsamples.append(v)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(vsamples)\n\nh= hist(vsamples,range=[0.,2.],bins=51,normed=True)\nxs= numpy.linspace(0.001,2.,1001)\nys= xs**(-len(data)/2.)*numpy.exp(-1./xs/2.*len(data)*(numpy.var(data)+numpy.mean(data)**2.))\nys/= numpy.sum(ys)*(xs[1]-xs[0])\nplot(xs,ys,lw=2.)",
"If we raise the threshold too much, we sample simply from the prior:",
"vsamples= []\nstart= time.time()\nfor v in Var_ABC(threshold=1.5):\n vsamples.append(v)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(vsamples)\n\nh= hist(vsamples,range=[0.,2.],bins=51,normed=True)\nxs= numpy.linspace(0.001,2.,1001)\nys= xs**(-len(data)/2.)*numpy.exp(-1./xs/2.*len(data)*(numpy.var(data)+numpy.mean(data)**2.))\nys/= numpy.sum(ys)*(xs[1]-xs[0])\nplot(xs,ys,lw=2.)",
"And if we make the threshold too small, we don't get many samples:",
"vsamples= []\nstart= time.time()\nfor v in Var_ABC(threshold=0.001):\n vsamples.append(v)\n if time.time() > start+2.: break\nprint \"Obtained %i samples\" % len(vsamples)\n\nh= hist(vsamples,range=[0.,2.],bins=51,normed=True)\nxs= numpy.linspace(0.001,2.,1001)\nys= xs**(-len(data)/2.)*numpy.exp(-1./xs/2.*len(data)*(numpy.var(data)+numpy.mean(data)**2.))\nys/= numpy.sum(ys)*(xs[1]-xs[0])\nplot(xs,ys,lw=2.)"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
AlbanoCastroSousa/RESSPyLab
|
examples/Old_RESSPyLab_Parameter_Calibration_Orientation_Notebook.ipynb
|
mit
|
[
"Import modules",
"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n\nimport RESSPyLab",
"1 - Load an experiment\nMake a list of pandas dataframes with (clean) experimental data from a csv file. This is done with the pandas package from data in csv files. Two columns should be included in the csv file with true strain (\"e_true\") and true stress (\"Sigma_true\").",
"\ntestFileNames=['example_1.csv']\n\n\nlistCleanTests=[]\n\nfor testFileName in testFileNames:\n \n test=pd.read_csv(testFileName)\n \n listCleanTests.append(test)\n\n",
"2 - Determine Voce and Chaboche material parameters with either VCopt_SVD or VCopt_J\nThere are two arguments to VCopt: an initial starting point for the parameters (\"x_0\") and the list of tests previously assembled.\nThe parameters are gathered in list in the following order:\n[E, sy0, Qinf, b, C_1, gamma_1, C_2, gamma_2, ..., ..., C_k, gamma_k]\nA recommended initial point is an elastic perfectly plastic model with the nominal values of the elastic modulus and the yield stress. All other values are, therefore, set to zero. For numerical purposes a minimum 1e-1 is used.\nThe examples herein are from an S355J2 steel. Nominal values are therefore: E=200e3MPa sy0=355MPa",
"x_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]\n\nsol=RESSPyLab.VCopt_SVD(x_0,listCleanTests)\n\nprint(sol)\n\nx_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]\n\nsol=RESSPyLab.VCopt_J(x_0,listCleanTests)\n\nprint(sol)",
"3 - Use the solution point to plot experiment vs simulation",
"simCurve=RESSPyLab.VCsimCurve(sol,test)\n\nplt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')\nplt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')\nplt.legend(loc='best')\nplt.xlabel('True strain')\nplt.ylabel('True stress')",
"4 - Example with multiple tests",
"\ntestFileNames=['example_1.csv','example_2.csv']\n\n\nlistCleanTests=[]\n\nfor testFileName in testFileNames:\n \n test=pd.read_csv(testFileName)\n \n listCleanTests.append(test)\n\n\n\nx_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]\n\nsol=RESSPyLab.VCopt_SVD(x_0,listCleanTests)\n\nprint(sol)\n\nx_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]\n\nsol=RESSPyLab.VCopt_J(x_0,listCleanTests)\n\nprint(sol)\n\ntest=pd.read_csv('example_1.csv')\n\nsimCurve=RESSPyLab.VCsimCurve(sol,test)\n\nplt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')\nplt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')\nplt.legend(loc='best')\nplt.xlabel('True strain')\nplt.ylabel('True stress')\n\ntest=pd.read_csv('example_2.csv')\n\nsimCurve=RESSPyLab.VCsimCurve(sol,test)\n\nplt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')\nplt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')\nplt.legend(loc='best')\nplt.xlabel('True strain')\nplt.ylabel('True stress')"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
hetaodie/hetaodie.github.io
|
assets/media/uda-ml/fjd/ica/独立成分分析/Independent Component Analysis Lab [SOLUTION]-zh.ipynb
|
mit
|
[
"独立成分分析 Lab\n在此 notebook 中,我们将使用独立成分分析方法从三个观察结果中提取信号,每个观察结果都包含不同的原始混音信号。这个问题与 ICA 视频中解释的问题一样。\n数据集\n首先看看手头的数据集。我们有三个 WAVE 文件,正如我们之前提到的,每个文件都是混音形式。如果你之前没有在 python 中处理过音频文件,没关系,它们实际上就是浮点数列表。\n首先加载第一个音频文件 ICA_mix_1.wav [点击即可聆听该文件]:",
"import numpy as np\nimport wave\n\n# Read the wave file\nmix_1_wave = wave.open('ICA_mix_1.wav','r')",
"我们看看该 wave 文件的参数,详细了解该文件",
"mix_1_wave.getparams()",
"该文件只有一个声道(因此是单声道)。帧率是 44100,表示每秒声音由 44100 个整数组成(因为文件是常见的 PCM 16 位格式,所以是整数)。该文件总共有 264515 个整数/帧,因此时长为:",
"264515/44100",
"我们从该 wave 文件中提取帧,这些帧将属于我们将运行 ICA 的数据集:",
"# Extract Raw Audio from Wav File\nsignal_1_raw = mix_1_wave.readframes(-1)\nsignal_1 = np.fromstring(signal_1_raw, 'Int16')",
"signal_1 现在是一个整数列表,表示第一个文件中包含的声音。",
"'length: ', len(signal_1) , 'first 100 elements: ',signal_1[:100]",
"如果将此数组绘制成线形图,我们将获得熟悉的波形:",
"import matplotlib.pyplot as plt\n\nfs = mix_1_wave.getframerate()\ntiming = np.linspace(0, len(signal_1)/fs, num=len(signal_1))\n\n\nplt.figure(figsize=(12,2))\nplt.title('Recording 1')\nplt.plot(timing,signal_1, c=\"#3ABFE7\")\nplt.ylim(-35000, 35000)\nplt.show()",
"现在我们可以按照相同的方式加载另外两个 wave 文件 ICA_mix_2.wav 和 ICA_mix_3.wav",
"\nmix_2_wave = wave.open('ICA_mix_2.wav','r')\n\n#Extract Raw Audio from Wav File\nsignal_raw_2 = mix_2_wave.readframes(-1)\nsignal_2 = np.fromstring(signal_raw_2, 'Int16')\n\n\nmix_3_wave = wave.open('ICA_mix_3.wav','r')\n\n#Extract Raw Audio from Wav File\nsignal_raw_3 = mix_3_wave.readframes(-1)\nsignal_3 = np.fromstring(signal_raw_3, 'Int16')\n\n\nplt.figure(figsize=(12,2))\nplt.title('Recording 2')\nplt.plot(timing,signal_2, c=\"#3ABFE7\")\nplt.ylim(-35000, 35000)\nplt.show()\n\n\nplt.figure(figsize=(12,2))\nplt.title('Recording 3')\nplt.plot(timing,signal_3, c=\"#3ABFE7\")\nplt.ylim(-35000, 35000)\nplt.show()\n",
"读取所有三个文件后,可以通过 zip 运算创建数据集。\n\n通过将 signal_1、signal_2 和 signal_3 组合成一个列表创建数据集 X",
"X = list(zip(signal_1, signal_2, signal_3))\n\n# Let's peak at what X looks like\nX[:10]",
"现在准备运行 ICA 以尝试获取原始信号。\n\n导入 sklearn 的 FastICA 模块\n初始化 FastICA,查看三个成分\n使用 fit_transform 对数据集 X 运行 FastICA 算法",
"# TODO: Import FastICA\nfrom sklearn.decomposition import FastICA\n\n# TODO: Initialize FastICA with n_components=3\nica = FastICA(n_components=3)\n\n# TODO: Run the FastICA algorithm using fit_transform on dataset X\nica_result = ica.fit_transform(X)\n\nica_result.shape",
"我们将其拆分为单独的信号并查看这些信号",
"result_signal_1 = ica_result[:,0]\nresult_signal_2 = ica_result[:,1]\nresult_signal_3 = ica_result[:,2]",
"我们对信号进行绘制,查看波浪线的形状",
"# Plot Independent Component #1\nplt.figure(figsize=(12,2))\nplt.title('Independent Component #1')\nplt.plot(result_signal_1, c=\"#df8efd\")\nplt.ylim(-0.010, 0.010)\nplt.show()\n\n# Plot Independent Component #2\nplt.figure(figsize=(12,2))\nplt.title('Independent Component #2')\nplt.plot(result_signal_2, c=\"#87de72\")\nplt.ylim(-0.010, 0.010)\nplt.show()\n\n# Plot Independent Component #3\nplt.figure(figsize=(12,2))\nplt.title('Independent Component #3')\nplt.plot(result_signal_3, c=\"#f65e97\")\nplt.ylim(-0.010, 0.010)\nplt.show()\n\n",
"某些波浪线看起来像音乐波形吗?\n确认结果的最佳方式是聆听生成的文件。另存为 wave 文件并进行验证。在此之前,我们需要:\n\n将它们转换为整数(以便另存为 PCM 16 位 Wave 文件),否则只有某些媒体播放器能够播放它们\n将值映射到 int16 音频的相应范围内。该范围在 -32768 到 +32767 之间。基本的映射方法是乘以 32767。\n音量有点低,我们可以乘以某个值(例如 100)来提高音量",
"from scipy.io import wavfile\n\n# Convert to int, map the appropriate range, and increase the volume a little bit\nresult_signal_1_int = np.int16(result_signal_1*32767*100)\nresult_signal_2_int = np.int16(result_signal_2*32767*100)\nresult_signal_3_int = np.int16(result_signal_3*32767*100)\n\n\n# Write wave files\nwavfile.write(\"result_signal_1.wav\", fs, result_signal_1_int)\nwavfile.write(\"result_signal_2.wav\", fs, result_signal_2_int)\nwavfile.write(\"result_signal_3.wav\", fs, result_signal_3_int)",
"现在生成的文件包括:[注意:确保先调低音响设备的音量,以防某些问题导致文件听起来像静态的]\n\nresult_signal_1.wav\nresult_signal_2.wav\nresult_signal_3.wav\n\n音乐:\n\n钢琴 - The Carnival of the Animals - XIII. The Swan(钢琴曲独奏版)。演奏者:Markus Staab\n大提琴 - Cello Suite no. 3 in C, BWV 1009 - I. Prelude。演奏者:European Archive"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
DillmannFrench/Intro-PYTHON
|
Cours03_DILLMANN_ISEP2016.ipynb
|
gpl-3.0
|
[
"1 Variables\n1.1 Qu'est ce qu'une Variable ?\n1.1.1 Contenu\nLe contenu d'une variable est une valeur. Il s'agit forcément d'une instance d'un objet , stockée en mémoire. Celle-ci est donc reliée à une classe. Le type de l'instance est le nom de la classe.",
"print(type(42))",
"Suposons que l'on veux calculer l'aire d'un cercle $A=\\pi.r^2$, où $r$ est le rayon, qui peux être variable et $\\pi=3.14159$ pour simplifier.",
"pi=3.14159\nr=11.2\nA=pi*r**2\n\nprint(\"L'aire du disque de rayon {0} m, est A={1} m^2\".format(r,A))\nprint(\"l'adresse du rayon est {0}, celle de l'aire est {1}\".format(id(r),id(A)))\n\nr=14.3\nA=pi*r**2\n######## On à changé le rayon\nprint(\"L aire du disque de rayon {0} m, est A={1} m^2\".format(r,A))\nprint(\"l'adresse du rayon est {0}, celle de l'aire est {1}\".format(id(r),id(A)))",
"1.1.2 Contenant\nLe contenant n'est rien d'autre que l'association par adressage, d'un nom et d'un pointeur vers le contennu, soit la valeur associé à ce nom.\nL'affectation est l'opération qui permet d'associer un contenu (opérande de droite) à un contennant (opérande de gauche) et donc d'associer un nom avec un pointeur vers une valeur.\nL'unique moyen de suprimer cette association entre contennu et contennant est de supprimer m'asociation entre le nom et le pointeur.",
"del(r)\nr=15e-3\nA=pi*r**2\nprint(\"L aire du disque de rayon {0} m, est A={1} m^2\".format(r,A))\nprint(\"l'adresse du rayon est {0}, celle de l'aire est {1}\".format(id(r),id(A)))",
"1.2 Les variables on une etendue limitée\n\n\nPar leur place dans le programme\nPar leur valeur, au moment de déclarer leur contennu\nPar leur type, au moment de déclarer leur contennant \nAu moment de déclarer leur nom (regles strictes sur les termes réservés par Python)\n\n\n1.2.1 Les Keywords ou noms réservés (30) + True, False et ...None\n\n\nand \nas \nassert \nbreak \nclass \ncontinue\ndef\ndel \nelif\nelse \nexcept \nfinally\nfor\nfrom \nglobal\nif \nimport\nin \nis \nlambda \nnonlocal \nnot \nor\npass\nraise\nreturn \ntry \nwhile \nwith \nyield \n\n\n1.2.2 Modes de modification d'une variable",
"#Variables permetant la reafectation\na=(1,)\nprint(\"le Type de la variable a est {}\".format(type(a)))\nprint(\"Son identifiant est {}\".format(id(a)))\n\na+=(2,)\nprint(\"le Type de la variable a est {}\".format(type(a)))\nprint(\"Son identifiant est {}\".format(id(a)))\n\n#Variables permetant le changement de place sans réaffectation\na=[1]\nprint(\"le Type de la variable a est {}\".format(type(a)))\nprint(\"Son identifiant est {}\".format(id(a)))\n\na+=[2]\nprint(\"le Type de la variable a est {}\".format(type(a)))\nprint(\"Son identifiant est {}\".format(id(a)))",
"1.3 Visibilité des variables\nSur une console, toute variable déclarée par une affectation dans une instruction indépendante est accessible depuis n'importe quel endroit.",
"# Ici on relance le Kernel, ce qui néttoie toute affectation\n\n%reset\n\nMy_Variable_Number=42\nglobals()",
"Certains blocs introduisent un nouvel espace de nomage",
"# On déclare une variable\nMy_Variable_Fetiche_Number=42\n# Puis on cree une fonction ou cette même variable est déclarée\n\n\ndef f():\n My_Variable_Fetiche_Number=4200000000000000000000\n print('Dans la fonction la valeur de ma variable fétiche est {}'.format(My_Variable_Fetiche_Number))\n\n \n# On execute la fonction\nf()\n# On veux savoir quelle est la valeur de la variable\nprint('La valeur de ma variable fétiche est {}'.format(My_Variable_Fetiche_Number))",
"2 Fonctions\nLes fonctions sont indispensables en programmation pour créer des entrées-sorties\n2.1 Déclaration des Fonctions\nUtilisation du mot clef def",
"# Here a function that prints something visual\n# This function has no output variable\n\ndef do_twice(f):\n \"\"\"\n This function has a docstring\n It executes twice the function f\n The program it is used by functions:\n \n - do_eight()\n - print_posts()\n - print_beams()\n\n \"\"\"\n f()\n f()\n\ndef do_eight(f):\n \"\"\"\n Usage:\n do_eight(function)\n It executes eight times the passed function\n \n Input:\n The function f is passed as a variable\n No global variables are passed \n \n Local variables\n No local variables are needed\n \n Dependencies\n The program it is used by functions:\n \n -print_grid()\n\n\n \"\"\"\n do_twice(f)\n do_twice(f)\n do_twice(f)\n do_twice(f)\n \n\ndef print_beam():\n print('| - - - -',end=\" \"),\n\ndef print_post():\n print('| ',end=\" \"),\n\ndef print_beams():\n do_twice(print_beam)\n print('|',end=\"\\n\")\n\ndef print_posts():\n do_twice(print_post)\n print('|',end=\"\\n\")\n\ndef print_row():\n print_beams()\n \ndef print_grid():\n print_row()\n do_eight(print_posts)\n\nTitre1='Premier'\nTitre2='Second'\nprint('| '+Titre1+' | '+Titre2+' |') \nprint_grid()",
"| Premier | Second |\n| - - - - | - - - - |\n| | |\n| | |\n| | |\n| | |",
"## On appelle de 'Doctring' de la fonction do_eight\n## \nhelp(do_eight)\n\ndef My_function_scare_of_n(n):\n n *= n\n return n\n #print(n)\n\nn=5 \nCarre=My_function_scare_of_n(n)\nprint(\"Le carré de {0} est {1}\".format(n,Carre))\n\n# Voici une fonction qui rend le carré de n\n\ndef My_function_scare_of_n(n):\n square = n*n \n return square\n\n# Je l'execute\nMy_number_to_scare = 5\nprint(\"le resultat de {0} au carré est {1}\" \\\n .format(My_number_to_scare,My_function_scare_of_n(My_number_to_scare)))",
"Attention return est indispensable pour que cette fonction rende quelque chose",
"# Voici une fonction ne rend rien mais c'est prévu\n# “Pass\" means this method has not been implemented yet, but this will be the place to do it\ndef update_agent(agent):\n s='Congratulate '+agent.upper()+' he doubles his salary'\n return s\n\ndef time_step(agents):\n for agent in agents:\n s=update_agent(agent)\n print('Case of Agent {}:'.format(agent))\n print(s) \n #On appelle cette fonction à l'intérieur\n #Mais pour le moment je ne sais pas \n #ce que je vais faire avec les agents\n\nagents=['bob','jim','joe']\ntime_step(agents)\n\n# Voici une fonction ne rend rien mais c'est une ERREUR DE PROGRAMMATION\n# “Pass\" means this method has not been implemented yet, but this will be the place to do it\ndef My_function(self):\n My_Variable_Fetiche_Number=4200000000\n \nprint('La valeur de ma variable fétiche est {}'.format(My_Variable_Fetiche_Number))",
"2.2 Utilisation des fonction pour le jeu du Pendu",
"import random\n\nwords = ['chicken', 'dog', 'cat', 'mouse', 'frog','horse','pig']\n\ndef pick_a_word():\n\tword_position = random.randint(0, len(words) - 1)\n\treturn words[word_position]\n\nword=pick_a_word()\nprint('The secret word is \"{}\" '.format(word))\n\nlives_remaining = 14 #this will become a global variable\nguessed_letters = ''",
"~~~ Python\nHere are the first STUBS\ndef get_guess(word):\n pass\ndef process_guess(guess, word):\n pass\n\ndef play():\n word = pick_a_word()\n while True:\n guess = get_guess(word)\n if process_guess(guess, word):\n print('You win! Well Done!')\n break\n if lives_remaining == 0:\n print('You are Hung!')\n print('The word was: ' + word)\n break\nplay()\n~~~",
"import random\n\nwords = ['chicken', 'dog', 'cat', 'mouse', 'frog']\nlives_remaining = 14\n\n############A do-always-wrong STUB\ndef get_guess(word):\n\treturn 'a'\n############################\n \ndef play():\n\tword = pick_a_word()\n\twhile True:\n\t\tguess = get_guess(word)\n\t\tif process_guess(guess, word):\n\t\t\tprint('You win! Well Done!')\n\t\t\tbreak\n\t\tif lives_remaining == 0:\n\t\t\tprint('You are Hung!')\n\t\t\tprint('The word was: ' + word)\n\t\t\tbreak\n\ndef pick_a_word():\n\tword_position = random.randint(0, len(words) - 1)\n\treturn words[word_position]\n\n\ndef process_guess(guess, word):\n\tglobal lives_remaining\n\tlives_remaining = lives_remaining -1\n\treturn False\n\t\nplay()",
"~~~ Python\n\ndef print_word_with_blanks(word):\n pass\n\ndef get_guess(word):\n print_word_with_blanks(word)\n print('Lives Remaining: ' + str(lives_remaining))\n guess = input(' Guess a letter or whole word?')\n return guess\nget_guess(word)\n~~~",
"#04_06_hangman_get_guess\n\nimport random\n\nwords = ['chicken', 'dog', 'cat', 'mouse', 'frog']\nlives_remaining = 14\n\ndef play():\n\tword = pick_a_word()\n\twhile True:\n\t\tguess = get_guess(word)\n\t\tif process_guess(guess, word):\n\t\t\tprint('You win! Well Done!')\n\t\t\tbreak\n\t\tif lives_remaining == 0:\n\t\t\tprint('You are Hung!')\n\t\t\tprint('The word was: ' + word)\n\t\t\tbreak\n\ndef pick_a_word():\n\tword_position = random.randint(0, len(words) - 1)\n\treturn words[word_position]\n\t\ndef get_guess(word):\n\tprint_word_with_blanks(word)\n\tprint('Lives Remaining: ' + str(lives_remaining))\n\tguess = input(' Guess a letter or whole word?')\n\treturn guess\n\ndef process_guess(guess, word):\n\tglobal lives_remaining\n\tlives_remaining = lives_remaining -1\n\treturn False\n\t\ndef print_word_with_blanks(word):\n\tprint('print_word_with_blanks: not done yet')\n\t\nplay()\n\nguessed_letters=''\n\ndef print_word_with_blanks(word):\n\tdisplay_word = ''\n\tfor letter in word:\n\t\tif guessed_letters.find(letter) > -1:\n\t\t\t# letter found\n\t\t\tdisplay_word = display_word + letter\n\t\telse:\n\t\t\t# letter not found\n\t\t\tdisplay_word = display_word + '-'\n\tprint(display_word)\n",
"Pour le moment process_guess est toujours un stub\n~~~ Python\n# STUB\ndef single_letter_guess(guess, word):\n pass\ndef whole_word_guess(guess, word):\n pass\n\ndef process_guess(guess, word):\n if len(guess) > 1 and len(guess) == len(word):\n return whole_word_guess(guess, word)\n else:\n return single_letter_guess(guess, word)\n~~~",
"#04_07_hangman_print_word\n\nimport random\n\nwords = ['chicken', 'dog', 'cat', 'mouse', 'frog']\nlives_remaining = 14\nguessed_letters = ''\n\ndef play():\n\tword = pick_a_word()\n\twhile True:\n\t\tguess = get_guess(word)\n\t\tif process_guess(guess, word):\n\t\t\tprint('You win! Well Done!')\n\t\t\tbreak\n\t\tif lives_remaining == 0:\n\t\t\tprint('You are Hung!')\n\t\t\tprint('The word was: ' + word)\n\t\t\tbreak\n\ndef pick_a_word():\n\tword_position = random.randint(0, len(words) - 1)\n\treturn words[word_position]\n\t\ndef get_guess(word):\n\tprint_word_with_blanks(word)\n\tprint('Lives Remaining: ' + str(lives_remaining))\n\tguess = input(' Guess a letter or whole word?')\n\treturn guess\n\ndef process_guess(guess, word):\n\tglobal lives_remaining\n\tglobal guessed_letters\n\tlives_remaining = lives_remaining - 1\n\tguessed_letters = guessed_letters + guess\n\treturn False\n\t\ndef print_word_with_blanks(word):\n\tdisplay_word = ''\n\tfor letter in word:\n\t\tif guessed_letters.find(letter) > -1:\n\t\t\t# letter found\n\t\t\tdisplay_word = display_word + letter\n\t\telse:\n\t\t\t# letter not found\n\t\t\tdisplay_word = display_word + '-'\n\tprint(display_word)\n\t\nplay()",
"On n'a pas encore gagné... et en plus on voudrait que le joueur puisse entrer une lettre ou un mot complet...\nOn va maintenat ecrire les fonction single_letter_guess et whole_world_guess",
"def whole_word_guess(guess, word):\n\tglobal lives_remaining\n\tif guess.lower() == word.lower():\n\t\treturn True\n\telse:\n\t\tlives_remaining = lives_remaining - 1\n\t\treturn False\n\ndef single_letter_guess(guess, word):\n\tglobal guessed_letters\n\tglobal lives_remaining\n\tif word.find(guess) == -1:\n\t\t# letter guess was incorrect\n\t\tlives_remaining = lives_remaining - 1\n\tguessed_letters = guessed_letters + guess.lower()\n\tif all_letters_guessed(word):\n\t\treturn True\n\treturn False\n\ndef all_letters_guessed(word):\n\tfor letter in word:\n\t\tif guessed_letters.find(letter.lower()) == -1:\n\t\t\treturn False\n\treturn True\n",
"Maintenant le jeu est complet",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 20 22:11:59 2016\n\"\"\"\n\nimport random\n\nwords = ['chicken', 'dog', 'cat', 'mouse', 'frog']\nlives_remaining = 14\nguessed_letters = ''\n\n\n\ndef play():\n\tword = pick_a_word()\n\twhile True:\n\t\tguess = get_guess(word)\n\t\tif process_guess(guess, word):\n\t\t\tprint('You win! Well Done!')\n\t\t\tbreak\n\t\tif lives_remaining == 0:\n\t\t\tprint('You are Hung!')\n\t\t\tprint('The word was: ' + word)\n\t\t\tbreak\n\t\ndef pick_a_word():\n\tword_position = random.randint(0, len(words) - 1)\n\treturn words[word_position]\n\t\ndef get_guess(word):\n\tprint_word_with_blanks(word)\n\tprint('Lives Remaining: ' + str(lives_remaining))\n\tguess = input(' Guess a letter or whole word?')\n\treturn guess\n\ndef print_word_with_blanks(word):\n\tdisplay_word = ''\n\tfor letter in word:\n\t\tif guessed_letters.find(letter) > -1:\n\t\t\t# letter found\n\t\t\tdisplay_word = display_word + letter\n\t\telse:\n\t\t\t# letter not found\n\t\t\tdisplay_word = display_word + '-'\n\tprint(display_word)\n\t\t\t\ndef process_guess(guess, word):\n\tif len(guess) > 1 and len(guess) == len(word):\n\t\treturn whole_word_guess(guess, word)\n\telse:\n\t\treturn single_letter_guess(guess, word)\n\n\t\t\t\ndef whole_word_guess(guess, word):\n\tglobal lives_remaining\n\tif guess.lower() == word.lower():\n\t\treturn True\n\telse:\n\t\tlives_remaining = lives_remaining - 1\n\t\treturn False\n\ndef single_letter_guess(guess, word):\n\tglobal guessed_letters\n\tglobal lives_remaining\n\tif word.find(guess) == -1:\n\t\t# letter guess was incorrect\n\t\tlives_remaining = lives_remaining - 1\n\tguessed_letters = guessed_letters + guess.lower()\n\tif all_letters_guessed(word):\n\t\treturn True\n\treturn False\n\ndef all_letters_guessed(word):\n\tfor letter in word:\n\t\tif guessed_letters.find(letter.lower()) == -1:\n\t\t\treturn False\n\treturn True\n\t\nplay()\n\n# Here a function that prints something visual\n# This function has no output variable\n\ndef do_twice(f):\n \"\"\"\n Usage: It executes twice the function f\n Inputs: It requires a function as argument\n Links: The program it is used by functions:\n \n - do_eight()\n - print_posts()\n - print_beams()\n\n \"\"\"\n f()\n f()\n\ndef do_eight(f):\n \"\"\"\n This function has a docstring\n It executes eight times the function\n \n The program it is used by functions:\n \n -print_grid()\n\n\n \"\"\"\n do_twice(f)\n do_twice(f)\n do_twice(f)\n do_twice(f)\n \n\ndef print_beam():\n print('| - - - -',end=\" \"),\n\ndef print_post():\n print('| ',end=\" \"),\n\ndef print_beams():\n do_twice(print_beam)\n print('|',end=\"\\n\")\n\ndef print_posts():\n do_twice(print_post)\n print('|',end=\"\\n\")\n\ndef print_row():\n print_beams()\n \ndef print_grid():\n print_row()\n do_eight(print_posts)\n\nTitre1='Premier'\nTitre2='Second'\nprint('| '+Titre1+' | '+Titre2+' |') \nprint_grid()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
cloudera/ibis
|
docs/source/tutorial/06-Advanced-Topics-ComplexFiltering.ipynb
|
apache-2.0
|
[
"Advanced Topics: Additional Filtering\nThe filtering examples we've shown to this point have been pretty simple, either comparisons between columns or fixed values, or set filter functions like isin and notin. \nIbis supports a number of richer analytical filters that can involve one or more of:\n\nAggregates computed from the same or other tables\nConditional aggregates (in SQL-speak these are similar to \"correlated subqueries\")\n\"Existence\" set filters (equivalent to the SQL EXISTS and NOT EXISTS keywords)\n\nSetup",
"import os\nimport ibis\n\nibis.options.interactive = True\n\nconnection = ibis.sqlite.connect(os.path.join('data', 'geography.db'))",
"Using scalar aggregates in filters",
"countries = connection.table('countries')\ncountries.limit(5)",
"We could always compute some aggregate value from the table and use that in another expression, or we can use a data-derived aggregate in the filter. Take the average of a column. For example the average of countries size:",
"countries.area_km2.mean()",
"You can use this expression as a substitute for a scalar value in a filter, and the execution engine will combine everything into a single query rather than having to access the database multiple times. For example, we want to filter European countries larger than the average country size in the world. See how most countries in Europe are smaller than the world average:",
"cond = countries.area_km2 > countries.area_km2.mean()\nexpr = countries[(countries.continent == 'EU') & cond]\nexpr",
"Conditional aggregates\nSuppose that we wish to filter using an aggregate computed conditional on some other expressions holding true.\nFor example, we want to filter European countries larger than the average country size, but this time of the average in Africa. African countries have an smaller size compared to the world average, and France gets into the list:",
"conditional_avg = countries[countries.continent == 'AF'].area_km2.mean()\ncountries[(countries.continent == 'EU') & (countries.area_km2 > conditional_avg)]",
"\"Existence\" filters\nSome filtering involves checking for the existence of a particular value in a column of another table, or amount the results of some value expression. This is common in many-to-many relationships, and can be performed in numerous different ways, but it's nice to be able to express it with a single concise statement and let Ibis compute it optimally.\nAn example could be finding all countries that had any year with a higher GDP than 3 trillion US dollars:",
"gdp = connection.table('gdp')\ngdp\n\ncond = ((gdp.country_code == countries.iso_alpha3) &\n (gdp.value > 3e12)).any()\n\ncountries[cond]['name']",
"Note how this is different than a join between countries and gdp, which would return one row per year. The method .any() is equivalent to filtering with a subquery.\nFiltering in aggregations\nSuppose that you want to compute an aggregation with a subset of the data for only one of the metrics / aggregates in question, and the complete data set with the other aggregates. Most aggregation functions are thus equipped with a where argument. Let me show it to you in action:",
"arctic = countries.name.isin(['United States',\n 'Canada',\n 'Finland',\n 'Greenland',\n 'Iceland',\n 'Norway',\n 'Russia',\n 'Sweden'])\n\nmetrics = [countries.count().name('# countries'),\n countries.population.sum().name('total population'),\n countries.population.sum(where=arctic).name('population arctic countries')]\n\n(countries.groupby(countries.continent)\n .aggregate(metrics))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
sailuh/perceive
|
Notebooks/CVE_Details/cve_details_introduction.ipynb
|
gpl-2.0
|
[
"Introduction\nLet's start our exploration of how CVE Details create types by examining this sample table of one of CVE Details 13 types. Specifically, this table refers to all entries labeled SQL Injection in 2016.",
"#using panda \nimport pandas as pd\nimport csv\nimport numpy as np\nimport math\n\n#We specify the cwe_id column type is str, otherwise pandas will infer it as a float adding a misleading decimal.\ncved_df = pd.read_csv(\"cve_details_sql_injection_2016.csv\",dtype={'cwe_id': str})\ncved_df['month'] = [d[5:7] for d in cved_df['published_date']]\ncved_df\n#np.unique(cved_df['vulnerability_type'])",
"Notice that the column vulnerability_type not only includes sql injection. It may also cite other identified types out of the 13. For instance, row 4 value is SQL XSS, indicating the entry is both of SQL and XSS types. \nImportant: Entries labeled with multiple types WILL appear, accordingly, on the tables. A combination of tables must be done carefully when collecting textual descriptions, to avoid redundance! \nAnalysis\nTo begin the analysis, let's consider how cwe ids, which by definition group vulnerabilities, are distributed across the type for this year and SQL Injection. Notice a cwe_id may or not occur, but the type is always defined. cve_id's, always occur, hence defining the upper bound a vulnerability type could had.",
"vulnerability_type_histogram = cved_df.groupby(by=['vulnerability_type'])['cwe_id','cve_id'].count()\nvulnerability_type_histogram\n",
"We can note some combinations of type occur much more frequently than others.\nLet's explore further the vulnerability types proposed by CVE Details, by not only counting the number of cwe id's per vulnerability type, but also what are the cwe_id'per type. Out of curiosity, let's also include the number of exploits that were reported.",
"vulnerability_type_histogram = cved_df.groupby(by=['vulnerability_type','cwe_id'])['cve_id','n_exploits'].count()\nprint(vulnerability_type_histogram)\nvulnerability_list = np.unique(cved_df['vulnerability_type'])\nvulnerability_by_month = cved_df.groupby(by=['vulnerability_type','month'])['cve_id'].count()\n\n",
"A pattern emerges in the construction of the types: For vulnerability types with a higher number of cwe entries, this higher number is led by a single cwe id. This is the case for 3 vulnerability types in the table above: Dir.Trav. being led by cwe_id 22, Exec Code Sql being led by cwe_id 89, and vulnerability type SQL on cwe_id 89. \nAdditionally, we should remember that the column vulnerability type can be a combination of 1 or more types of the 13 proposed by CVE Details. Specifically, Exec Code Sql actually refers to the type Code Execution and the type SQL. \nWe can also observe just the distribution of cwe_id's reported for SQL table for 2016.",
"vulnerability_histogram = cved_df.groupby(by=['cwe_id'])['cve_id'].count()\nvulnerability_histogram",
"Visualization\nVulnerability type histogram",
"#imports for histogram\nimport numpy as np\nimport pandas as pd\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import Range1d\nfrom bokeh.io import output_notebook\nfrom bokeh.charts import Bar\nimport matplotlib.pyplot as plot\nfrom datetime import datetime\noutput_notebook() \n\n#creating a histogram for vulnerability types by creating a dictionary\nvulnerability_type_histogram = cved_df.groupby(by=['vulnerability_type'])['cve_id'].count()\ndata = {}\ndata['Entries'] = vulnerability_type_histogram\n#saving in dictionary for sorting and visualising\ndf_data = pd.DataFrame(data).sort_values(by='Entries', ascending=True)\nseries = df_data.loc[:,'Entries']\n\np = figure(width=800, y_range=series.index.tolist(), title=\"Histogram of Vulnerability Types from CVE Details\")\n\np.xaxis.axis_label = 'Frequency'\np.xaxis.axis_label_text_font_size = '10pt'\np.xaxis.major_label_text_font_size = '8pt'\n\np.yaxis.axis_label = 'Vulnerability Type Combinations of the 13 Types'\np.yaxis.axis_label_text_font_size = '10pt'\np.yaxis.major_label_text_font_size = '8pt'\n\nj = 1\nfor k,v in series.iteritems():\n \n #Print fields, values, orders\n #print (k,v,j) \n p.rect(x=v/2, y=j, width=abs(v), height=0.4,\n width_units=\"data\", height_units=\"data\")\n j += 1\n",
"The histogram below represents vulnerability types as mentioned in the Cve Details database. The vulnerability types are explained below:\n\nDir. Trav. stands for Directory Traversal\nDir. Trav. Bypass stands for Directory Traversal Bypass\nDir. Trav. File Inclusion stands for Directory Traversal File Inclusion\nDoS Sql stands for Denial of Service attack using Structured Query Language\nExec Code Dir. Trav. stands for Execution of arbitrary code causing a Directory Traversal vulnerability\nExec code Sql stands for Execution of arbitrary code causing a Structured Query Language vulnerability\nExec Code Sql +Info stands for Execution of arbitrary code causing a Structured Query Language vulnerability or access to sensitive information\nSql stands for Structured Query language vulnerability\nSql +Info stands for a Structured Query Language vulnerability or access to sensitive information\nSql Bypass +Info stands for a Structured Query Language Bypass vulnerability or access to sensitive information\nSql XSS stands for Cross-site scripting(XSS) vulnerability in SQL",
"show(p)",
"We created this histogram to gain insights into the the number of occurances of each of the vulnerability types. On analysis we can see that Exec. code Sql is the most frequent type of attack, followed by Dir. Trav. and Sql types which qualify to the three most frequent vulnerability types. \nCWE ID count histogram",
"#creating a histogram for cwe ID types by creating a dictionary\ndata = {}\ndata['Entries'] = vulnerability_histogram\n#saving in dictionary for sorting and visualising\ndf_data = pd.DataFrame(data).sort_values(by='Entries', ascending=True)\nseries = df_data.loc[:,'Entries']\n\np = figure(width=800, y_range=series.index.tolist(), title=\"Histogram of CWE IDs from CVE Details\")\n\np.xaxis.axis_label = 'Frequency'\np.xaxis.axis_label_text_font_size = '10pt'\np.xaxis.major_label_text_font_size = '8pt'\n\np.yaxis.axis_label = 'CWE ID'\np.yaxis.axis_label_text_font_size = '10pt'\np.yaxis.major_label_text_font_size = '8pt'\n\nj = 1\nfor k,v in series.iteritems():\n \n #Print fields, values, orders\n #print (k,v,j) \n p.rect(x=v/2, y=j, width=abs(v), height=0.4,\n width_units=\"data\", height_units=\"data\")\n j += 1\nshow(p)",
"The histogram above shows the frequency of CWE IDs of the CVE Details database. The most frequent CWE IDs are 89 and 22 which account for more than 90% of the entries.",
"color_map = {\n'+Priv Dir. Trav.': 'red',\n'Dir. Trav.': 'green',\n'Dir. Trav. +Info': 'yellow',\n'Dir. Trav. Bypass': 'violet',\n'Dir. Trav. File Inclusion': 'indigo',\n'DoS Sql': 'brown',\n'Exec Code Dir. Trav.': 'black',\n'Exec Code Sql': 'blue',\n'Exec Code Sql +Info': 'orange',\n'Sql': 'olive',\n'Sql +Info' : 'navy',\n'Sql Bypass +Info' : 'firebrick',\n'Sql XSS' : '#F4A582'\n}\n\ndef create_multi_line(vul):\n map_data = {}\n for v in vul.items():\n tuple_0 = v[0]\n if tuple_0[0] in map_data:\n map_data[tuple_0[0]].append((tuple_0[1],v[1]))\n else:\n map_data[tuple_0[0]] =[]\n map_data[tuple_0[0]].append((tuple_0[1],v[1]))\n vul_plot = plot.subplot(111)\n for k,v in map_data.items():\n vul_plot.plot([int(i[0]) for i in v],[int(j[1]) for j in v] , label=k)\n vul_plot.set_ylabel('count')\n vul_plot.set_xlabel('month')\n\n vul_plot.legend(bbox_to_anchor=(1.05, 1),loc=2, borderaxespad=0.)\n vul_plot.set_title(\"Count of Vulnerability type per month for the year 2016\")\n vul_plot.set_autoscaley_on(False)\n vul_plot.set_ylim([0,25])\n vul_plot.set_autoscalex_on(False)\n vul_plot.set_xlim([1,12])\n vul_plot.xaxis.set_ticks([1,2,3,4,5,6,7,8,9,10,11,12])\n plot.show()\n\n\n#We specify the cwe_id column type is str, otherwise pandas will infer it as a float adding a misleading decimal.\ncved_df = pd.read_csv(\"cve_details_sql_injection_2016.csv\")\ncved_df['month'] = [d[5:7] for d in cved_df['published_date']]\nvulnerability_type_histogram = cved_df.groupby(by=['vulnerability_type','cwe_id'])['cve_id','n_exploits'].count()\nvulnerability_type_histogram\ndata = cved_df.groupby(by=['vulnerability_type','month'])['cve_id'].count()\n\n\ncreate_multi_line(data)\n\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
fossdevil/Assignments
|
PRNN/Assignment/Assignment3/word2Vec.ipynb
|
mit
|
[
"import numpy as np\nimport csv\nimport pandas as pd\nfrom sklearn import svm\nfrom nltk.corpus import stopwords\nimport re\nimport nltk\nfrom nltk.tokenize import *\nfrom nltk.data import load",
"To run this file download To run download https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM/edit?usp=sharing File name : \"GoogleNews-vectors-negative300.bin\"\nCan also be found here: https://code.google.com/archive/p/word2vec/\nReading Files Line by Line",
"def sentenceExtractionForTraining(dirName, fileName, classes):\n sentencesClass = []\n for i in range(0,len(classes)):\n sentences = readFile(dirName+fileName[i])\n sentencesClass.append(sentences)\n return sentencesClass\n\ndef readFile(fileName):\n f = open(fileName,\"r+\")\n sentences = []\n for line in f.readlines():\n line = line.strip()\n if len(line)==0 :\n continue\n if \"TIMEX3\" in line:\n continue \n sentences.append(line.lower().rstrip())\n f.close()\n return sentences\n\ndef createVocab(sentencesClass):\n vocab = set()\n for i in range(0, len(sentencesClass)):\n for j in range(0,len(sentencesClass[i])):\n words = sentencesClass[i][j].split()\n for w in words:\n vocab.add(w)\n return vocab",
"Removing fancy characters",
"def removeFancyChars(sentences):\n lengthPhrase = len(sentences)\n for i in range(lengthPhrase):\n sentences[i] = re.sub(r'([^\\s\\w]|_)+', '', sentences[i])\n return sentences\n\ndef removeFC(sentencesClass):\n for i in range(0, len(sentencesClass)):\n sentencesClass[i] = removeFancyChars(sentencesClass[i])\n return sentencesClass",
"word2vec.",
"def load_bin_vec(fname, vocab):\n \"\"\"\n Loads 300x1 word vecs from Google (Mikolov) word2vec\n \"\"\"\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs\n\ndef add_unknown_words(word_vecs, vocab, min_df=1, k=300):\n for word in vocab:\n if word not in word_vecs:\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)\n\ndef initializeWordVecs(sentencesClass):\n vocab = createVocab(sentencesClass)\n w2vFile = \"GoogleNews-vectors-negative300.bin\"\n \n w2v = load_bin_vec(w2vFile, vocab)\n add_unknown_words(w2v, vocab)\n \n return w2v",
"For Parts Of Speech",
"def POSForSentence(sentence):\n text = word_tokenize(sentence)\n posSentence = nltk.pos_tag(text)\n posSentence = [y for x, y in posSentence]\n return posSentence\n\ndef getUniquePOS():\n tagdict = load('help/tagsets/upenn_tagset.pickle')\n return len(tagdict), tagdict.keys()",
"For w2v",
"def totalSentences(sentencesClass):\n size = 0\n for i in range(0, len(sentencesClass)):\n size += len(sentencesClass[i])\n return size;\n\ndef defineW2V(sentencesClass, w2v, dim = 300):\n n = totalSentences(sentencesClass)\n mat = np.zeros((n, dim))\n labels = np.zeros(n)\n k = 0\n for i in range(0, len(sentencesClass)):\n for j in range(0, len(sentencesClass[i])):\n words = sentencesClass[i][j].split()\n d = np.zeros(300)\n ind = 0.0\n for w in words:\n if w not in w2v:\n w2v[w] = np.random.uniform(-0.25,0.25,dim) \n d += w2v[w]\n ind += 1.0\n d /= ind\n mat[k] = d\n labels[k] = i+1\n k+=1\n return mat, labels\n\ndef defineW2VPOS(originalSentencesClass, sentencesClass, w2v, dim = 300):\n n = totalSentences(sentencesClass)\n labels = np.zeros(n)\n sizePOS, POSList = getUniquePOS()\n mat = np.zeros((n, dim + sizePOS))\n matFromW2V, labels = defineW2V(sentencesClass, w2v)\n \n for i in range(0, n):\n mat[i,:dim] = matFromW2V[i]\n \n k = 0\n for i in range(0, len(originalSentencesClass)):\n for j in range(0, len(originalSentencesClass[i])):\n pos = POSForSentence(originalSentencesClass[i][j])\n for p in pos:\n mat[k, dim + POSList.index(p)] = 1.0\n k+=1\n \n return mat, labels",
"Saving to file",
"def savew2vToFile(w2v):\n fileName = \"word2VecDict.npy\"\n np.save(fileName, w2v)\n\ndef finalFeaturesLabel(X,y):\n n, d = X.shape\n finalMat = np.zeros((n,d+1))\n for i in range(0, n):\n finalMat[i, 0] = y[i]\n finalMat[i, 1:] = X[i]\n return finalMat\n\ndef saveW2V(fileName, finalMat):\n np.save(fileName, finalMat)",
"Loading from file",
"def loadW2V():\n w2v = np.load('word2VecDict.npy').item()\n \n i = 0\n for key, value in w2v.iteritems():\n if i>10:\n break\n print key, value\n i = i + 1\n\ndef main():\n dirName = \"Email-classification_dataset/\"\n classes = [1,2,3,4,5]\n fileName = [\"RD-positive-800.txt\", \"meetings-positive-800.txt\", \"negative-800.txt\", \"fyi-positive-800.txt\", \"tp-positive-500.txt\",]\n originalsentencesClass = sentenceExtractionForTraining(dirName, fileName, classes)\n sentencesClass = removeFC(originalsentencesClass)\n w2v = initializeWordVecs(sentencesClass)\n \n savew2vToFile(w2v)\n \n XW2V, yW2V = defineW2V(sentencesClass, w2v)\n \n XW2VPOS, yW2VPOS = defineW2VPOS(originalsentencesClass, sentencesClass, w2v)\n \n finalMatW2V = finalFeaturesLabel(XW2V, yW2V)\n finalMatW2VPOS = finalFeaturesLabel(XW2VPOS, yW2VPOS)\n \n saveW2V(\"w2v.npy\",finalMatW2V)\n saveW2V(\"w2vpos.npy\",finalMatW2VPOS)\n\nif __name__==\"__main__\":\n main()"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
autism-research-centre/Autism-Gradients
|
Gradients.ipynb
|
gpl-3.0
|
[
"Created on Mon Dec 01 15:05:56 2016\n@author: Richard\nRequired packages:\n\npySTATIS\nnumpy\nmapalign\nnibabel\nsklearn\ncluster_roi\n\nsuggested file struture:\n\nmain/\nmain/cpac/filt_noglobal/rois_cc400/ > for data files\nmain/Affn/ > for adjacency matrices\nmain/Embs/ > for diffusion embedding files\n\ndownload ABIDE data:\nhttp://preprocessed-connectomes-project.org/abide/download.html\npython download_abide_preproc.py -d rois_cc400 -p cpac -s filt_noglobal -o data/ -x 'M' -gt 18 -lt 55",
"## lets start with some actual script\n# import useful things\nimport numpy as np\nimport os\nimport nibabel as nib\nfrom sklearn.metrics import pairwise_distances\n\n# get a list of inputs\nfrom os import listdir\nfrom os.path import isfile, join\nimport os.path\n\n# little helper function to return the proper filelist with the full path\ndef listdir_nohidden(path):\n for f in os.listdir(path):\n if not f.startswith('.'):\n yield f\n\ndef listdir_fullpath(d):\n return [os.path.join(d, f) for f in listdir_nohidden(d)]\n# and create a filelist\nonlyfiles = listdir_fullpath(\"data/Outputs/cpac/filt_noglobal/rois_cc400\")",
"Check all files to see if any have missing nodal information and create a selection list based on the ones that are 100% complete.",
"# check to see which files contains nodes with missing information\nmissingarray = []\nfor i in onlyfiles:\n# load timeseries\n filename = i\n ts_raw = np.loadtxt(filename)\n\n# check zero columns\n missingn = np.where(~ts_raw.any(axis=0))[0]\n missingarray.append(missingn)\n\n# select the ones that don't have missing data\nids = np.where([len(i) == 0 for i in missingarray])[0]\nselected = [onlyfiles[i] for i in ids]\n# could be useful to have one without pathnames later one\nselected2 = [os.path.basename(onlyfiles[i]) for i in ids]\nprint(len(selected))",
"run the diffusion embedding",
"# run the diffusion embedding\nfrom mapalign import embed\n\nfor i in selected:\n # load timeseries\n #print i\n filename = i\n ts = np.loadtxt(filename)\n # create correlation matrix\n dcon = np.corrcoef(ts.T)\n dcon[np.isnan(dcon)] = 0\n\n # Get number of nodes\n N = dcon.shape[0]\n\n # threshold\n perc = np.array([np.percentile(x, 90) for x in dcon])\n\n for ii in range(dcon.shape[0]):\n #print \"Row %d\" % ii\n dcon[ii, dcon[ii,:] < perc[ii]] = 0\n\n # If there are any left then set them to zero\n dcon[dcon < 0] = 0\n\n # compute the pairwise correctionlation distances\n aff = 1 - pairwise_distances(dcon, metric = 'cosine')\n\n # start saving\n savename = os.path.basename(filename)\n np.save(\"./data/Outputs/Affn/\"+savename+\"_cosine_affinity.npy\", aff)\n # get the diffusion maps\n emb, res = embed.compute_diffusion_map(aff, alpha = 0.5)\n # Save results\n np.save(\"./data/Outputs/Embs/\"+savename+\"_embedding_dense_emb.npy\", emb)\n np.save(\"./data/Outputs/Embs/\"+savename+\"_embedding_dense_res.npy\", res)\n\n X = res['vectors']\n X = (X.T/X[:,0]).T[:,1:] \n \n np.save(\"./data/Outputs/Embs/\"+savename+\"_embedding_dense_res_veconly.npy\", X) #store vectors only",
"Run Statis to back-project the grouped embeddings",
"%%capture\nfrom pySTATIS import statis\n\n#load vectors\nnames = list(xrange(392))\nX = [np.load(\"./data/Outputs/Embs/\"+ os.path.basename(filename)+\"_embedding_dense_res_veconly.npy\") for filename in selected2]\nout = statis.statis(X, names, fname='statis_results.npy')\nstatis.project_back(X, out['Q'], path = \"./data/Outputs/Regs/\",fnames = selected2)\nnp.save(\"Mean_Vec.npy\",out['F'])\n\n# saving everything in one dump\nimport pickle\nwith open('output.pickle' ,'w') as f:\n pickle.dump([selected, out],f)",
"plotting\nplot to surface for inspection\nthis cell in only necessary for plotting below",
"%matplotlib inline\nimport matplotlib.pylab as plt\nimport nilearn\nimport nilearn.plotting\n\nimport numpy as np\nimport nibabel as nib\n\ndef rebuild_nii(num):\n\n data = np.load('Mean_Vec.npy')\n a = data[:,num].copy()\n nim = nib.load('cc400_roi_atlas.nii')\n imdat=nim.get_data()\n imdat_new = imdat.copy()\n\n for n, i in enumerate(np.unique(imdat)):\n if i != 0:\n imdat_new[imdat == i] = a[n-1] * 10000 # scaling factor. Could also try to get float values in nifti...\n\n nim_out = nib.Nifti1Image(imdat_new, nim.get_affine(), nim.get_header())\n nim_out.set_data_dtype('float32')\n # to save:\n nim_out.to_filename('Gradient_'+ str(num) +'_res.nii')\n\n nilearn.plotting.plot_epi(nim_out)\n return(nim_out)\n\nfor i in range(10):\n nims = rebuild_nii(i)",
"Output everything to an excel file",
"import pandas as pd\n# read in csv\ndf_phen = pd.read_csv('Phenotypic_V1_0b_preprocessed1.csv')\n# add a column that matches the filename\nfor i in df_phen:\n df_phen['filename'] = join(df_phen['FILE_ID']+\"_rois_cc400.1D\")\n df_phen['filenamelpy'] = join(df_phen['FILE_ID']+\"_rois_cc400.1D.npy\")\n\ndf_phen['selec'] = np.where(df_phen['filename'].isin((selected2)), 1, 0)",
"Compare the slopes across subjects",
"from scipy import stats\ngrdnt_slope = []\nfor i in selected2:\n # load gradients\n # print i\n filename = i\n grdnt = np.load(\"./data/Outputs/Regs/\" + filename + \".npy\")\n # do we need a specific ordering of the nodes??\n y = list(xrange(392))\n temp = []\n for ii in range(10):\n x = sorted(grdnt[:,ii]) # just sort in ascending order?\n slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)\n temp.append(slope)\n \n grdnt_slope.append(temp)\ngrdnt_slope = np.array(grdnt_slope)\n# make it into a dataframe\ndata_grdnt = pd.DataFrame(grdnt_slope)\ndata_grdnt['file'] = selected2",
"And write them to an excel file",
"data = df_phen.loc[df_phen[\"selec\"] == 1]\ndata['filenamelow'] = data['filename'].str.lower()\ndata = data.sort(['filenamelow'])\n\noutput = data.merge(data_grdnt, left_on='filename',right_on='file',how='outer')\noutput.to_csv('Combined.csv', sep='\\t')",
"Plot some stuff",
"## numpy is used for creating fake data\n%matplotlib inline\nimport numpy as np \nimport matplotlib as mpl \n\n## agg backend is used to create plot as a .png file\nmpl.use('agg')\n\nimport matplotlib.pyplot as plt \ndf = pd.DataFrame(output, columns = ['DX_GROUP', 0,1,2,3,4,5,6,7,8,9])\nASC = df['DX_GROUP'] == 2\nNT = df['DX_GROUP'] == 1\nG1 = df[ASC]\nG2 = df[NT]\n\n# some plotting options\nfs = 10 # fontsize\nflierprops = dict(marker='o', markerfacecolor='green', markersize=12,\n linestyle='none')\n\n## combine the groups collections into a list \nGrd0 = [G1[0], G2[0]]\nGrd1 = [G1[1], G2[1]]\nGrd2 = [G1[2], G2[2]]\nGrd3 = [G1[3], G2[3]]\nGrd4 = [G1[4], G2[4]]\nGrd5 = [G1[5], G2[5]]\nGrd6 = [G1[6], G2[6]]\nGrd7 = [G1[7], G2[7]]\nGrd8 = [G1[8], G2[8]]\nGrd9 = [G1[9], G2[9]]\n\nfig, axes = plt.subplots(nrows=2, ncols=5, figsize=(6, 6), sharey=True)\n\naxes[0, 0].boxplot(Grd0, patch_artist=True)\naxes[0, 0].set_title('G0', fontsize=fs)\n\naxes[0, 1].boxplot(Grd1, patch_artist=True)\naxes[0, 1].set_title('G1', fontsize=fs)\n\naxes[0, 2].boxplot(Grd2, patch_artist=True)\naxes[0, 2].set_title('G2', fontsize=fs)\n\naxes[0, 3].boxplot(Grd3, patch_artist=True)\naxes[0, 3].set_title('G3', fontsize=fs)\n\naxes[0, 4].boxplot(Grd4, patch_artist=True)\naxes[0, 4].set_title('G4', fontsize=fs)\n\naxes[1, 0].boxplot(Grd5, patch_artist=True)\naxes[1, 0].set_title('G5', fontsize=fs)\n\naxes[1, 1].boxplot(Grd6, patch_artist=True)\naxes[1, 1].set_title('G6', fontsize=fs)\n\naxes[1, 2].boxplot(Grd7, patch_artist=True)\naxes[1, 2].set_title('G7', fontsize=fs)\n\naxes[1, 3].boxplot(Grd8, patch_artist=True)\naxes[1, 3].set_title('G8', fontsize=fs)\n\naxes[1, 4].boxplot(Grd9, patch_artist=True)\naxes[1, 4].set_title('G9', fontsize=fs)\n\nfig.suptitle(\"Gradient Slopes\")\nfig.subplots_adjust(hspace=0.4)",
"Permutations",
"def exact_mc_perm_test(xs, ys, nmc):\n n, k = len(xs), 0\n diff = np.abs(np.mean(xs) - np.mean(ys))\n zs = np.concatenate([xs, ys])\n for j in range(nmc):\n np.random.shuffle(zs)\n k += diff < np.abs(np.mean(zs[:n]) - np.mean(zs[n:]))\n return k / nmc\n\nprint(exact_mc_perm_test(G1[0],G2[0],1000))\nprint(exact_mc_perm_test(G1[1],G2[1],1000))",
"Some quality control",
"%matplotlib inline\n# this cell in only necessary for plotting below\nimport matplotlib.pylab as plt \nimport nilearn \nimport nilearn.plotting \n\nimport numpy as np\nimport nibabel as nib\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef rebuild_nii(num):\n \n data = np.load('Mean_Vec.npy')\n a = data[:,num].copy()\n nim = nib.load('cc400_roi_atlas.nii')\n imdat=nim.get_data()\n imdat_new = imdat.copy()\n\n for n, i in enumerate(np.unique(imdat)):\n if i != 0:\n imdat_new[imdat == i] = a[n-1] * 100000 # scaling factor. Could also try to get float values in nifti...\n\n nim_out = nib.Nifti1Image(imdat_new, nim.get_affine(), nim.get_header())\n nim_out.set_data_dtype('float32')\n # to save:\n # nim_out.to_filename('res.nii')\n\n nilearn.plotting.plot_epi(nim_out)\n\ndef rebuild_nii_individ(num):\n \n onlyfiles = [f for f in listdir_nohidden('./data/Outputs/Regs/') if isfile(join('./data/Outputs/Regs/', f))]\n for index in range(178):\n \n sub = onlyfiles[index]\n print(sub)\n data = np.load('./data/Outputs/Regs/%s' % sub)\n a = data[:,num].astype('float32')\n nim = nib.load('cc400_roi_atlas.nii')\n imdat = nim.get_data().astype('float32')\n \n #print(np.unique(a))\n for i in np.unique(imdat):\n #a[a>0.1] = 0\n #a[a<-0.1] = 0\n if i != 0 and i < 392:\n imdat[imdat == i] = a[int(i)-1] # scaling factor. Could also try to get float values in nifti...\n elif i >= 392:\n imdat[imdat == i] = np.nan\n\n nim_out = nib.Nifti1Image(imdat, nim.get_affine(), nim.get_header())\n nim_out.set_data_dtype('float32')\n # to save:\n nim_out.to_filename(os.getcwd() + '/data/Outputs/individual/' + 'res' + sub + str(num) + '.nii')\n print(os.getcwd())\n # nilearn.plotting.plot_epi(nim_out)\n",
"Check all individual images",
"nims = rebuild_nii_individ(0)\n\n!fslview resCaltech_0051474_rois_cc400.1D.npy.nii"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
probml/pyprobml
|
notebooks/misc/poly_regression_torch.ipynb
|
mit
|
[
"Polynomial regression in 1d.\nBased on sec 4.4 of\n http://d2l.ai/chapter_multilayer-perceptrons/underfit-overfit.html",
"import numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(seed=1)\nimport math\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils import data\nfrom IPython import display\n\n!mkdir figures # for saving plots\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\")\n\n# For reproducing the results on different runs\ntorch.backends.cudnn.deterministic = True\ntorch.manual_seed(hash(\"by removing stochasticity\") % 2**32 - 1)\ntorch.cuda.manual_seed_all(hash(\"so runs are repeatable\") % 2**32 - 1)",
"Data\nMake some data using this function:\n$$y = 5 + 1.2x - 3.4\\frac{x^2}{2!} + 5.6 \\frac{x^3}{3!} + \\epsilon \\text{ where }\n\\epsilon \\sim \\mathcal{N}(0, 0.1^2).$$",
"np.random.seed(42)\nmax_degree = 20 # Maximum degree of the polynomial\nn_train, n_test = 100, 100 # Training and test dataset sizes\ntrue_w = np.zeros(max_degree) # Allocate lots of empty space\ntrue_w[0:4] = np.array([5, 1.2, -3.4, 5.6])\n\nfeatures = np.random.normal(size=(n_train + n_test, 1))\nnp.random.shuffle(features)\npoly_features = np.power(features, np.arange(max_degree).reshape(1, -1))\nfor i in range(max_degree):\n poly_features[:, i] /= math.gamma(i + 1) # `gamma(n)` = (n-1)!\n# Shape of `labels`: (`n_train` + `n_test`,)\nlabels = np.dot(poly_features, true_w)\nlabels += np.random.normal(scale=0.1, size=labels.shape)\n\n# Convert from NumPy ndarrays to tensors\ntrue_w, features, poly_features, labels = [\n torch.tensor(x, dtype=torch.float32) for x in [true_w, features, poly_features, labels]\n]\n\nprint(true_w)",
"Train/eval loop",
"class Accumulator:\n \"\"\"For accumulating sums over `n` variables.\"\"\"\n\n def __init__(self, n):\n self.data = [0.0] * n\n\n def add(self, *args):\n self.data = [a + float(b) for a, b in zip(self.data, args)]\n\n def reset(self):\n self.data = [0.0] * len(self.data)\n\n def __getitem__(self, idx):\n return self.data[idx]\n\n\nclass Animator:\n \"\"\"For plotting data in animation.\"\"\"\n\n def __init__(\n self,\n xlabel=None,\n ylabel=None,\n legend=None,\n xlim=None,\n ylim=None,\n xscale=\"linear\",\n yscale=\"linear\",\n fmts=(\"-\", \"m--\", \"g-.\", \"r:\"),\n nrows=1,\n ncols=1,\n figsize=(3.5, 2.5),\n ):\n # Incrementally plot multiple lines\n if legend is None:\n legend = []\n # d2l.use_svg_display()\n display.set_matplotlib_formats(\"svg\")\n self.fig, self.axes = plt.subplots(nrows, ncols, figsize=figsize)\n if nrows * ncols == 1:\n self.axes = [\n self.axes,\n ]\n # Use a lambda function to capture arguments\n self.config_axes = lambda: set_axes(self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)\n self.X, self.Y, self.fmts = None, None, fmts\n\n def add(self, x, y):\n # Add multiple data points into the figure\n if not hasattr(y, \"__len__\"):\n y = [y]\n n = len(y)\n if not hasattr(x, \"__len__\"):\n x = [x] * n\n if not self.X:\n self.X = [[] for _ in range(n)]\n if not self.Y:\n self.Y = [[] for _ in range(n)]\n for i, (a, b) in enumerate(zip(x, y)):\n if a is not None and b is not None:\n self.X[i].append(a)\n self.Y[i].append(b)\n self.axes[0].cla()\n for x, y, fmt in zip(self.X, self.Y, self.fmts):\n self.axes[0].plot(x, y, fmt)\n self.config_axes()\n display.display(self.fig)\n display.clear_output(wait=True)\n\n# Incrementally update loss metrics during training\ndef evaluate_loss(net, data_iter, loss):\n \"\"\"Evaluate the loss of a model on the given dataset.\"\"\"\n metric = Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = y.reshape(out.shape)\n l = loss(out, y)\n metric.add(l.sum(), l.numel())\n return metric[0] / metric[1]\n\n\ndef load_array(data_arrays, batch_size, is_train=True):\n \"\"\"Construct a PyTorch data iterator.\"\"\"\n dataset = data.TensorDataset(*data_arrays)\n return data.DataLoader(dataset, batch_size, shuffle=is_train)\n\n\ndef set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):\n \"\"\"Set the axes for matplotlib.\"\"\"\n axes.set_xlabel(xlabel)\n axes.set_ylabel(ylabel)\n axes.set_xscale(xscale)\n axes.set_yscale(yscale)\n axes.set_xlim(xlim)\n axes.set_ylim(ylim)\n if legend:\n axes.legend(legend)\n axes.grid()\n\n\ndef accuracy(y_hat, y):\n \"\"\"Compute the number of correct predictions.\"\"\"\n if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:\n y_hat = d2l.argmax(y_hat, axis=1)\n cmp = y_hat.to(y.dtype) == y\n return float(torch.sum(cmp.to(y.dtype)))\n\ndef train_epoch(net, train_iter, loss, updater):\n \"\"\"The training loop defined in Chapter 3.\"\"\"\n # Set the model to training mode\n if isinstance(net, torch.nn.Module):\n net.train()\n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y in train_iter:\n # Compute gradients and update parameters\n y_hat = net(X)\n l = loss(y_hat, y)\n\n # Using PyTorch in-built optimizer & loss criterion\n updater.zero_grad()\n l.backward()\n updater.step()\n metric.add(float(l) * len(y), accuracy(y_hat, y), y.numel())\n\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]\n\n# SGD optimization\n\n\ndef train(train_features, test_features, train_labels, test_labels, num_epochs=400, batch_size=50):\n loss = nn.MSELoss()\n input_shape = train_features.shape[-1]\n # Switch off the bias since we already catered for it in the polynomial\n # features\n net = nn.Sequential(nn.Linear(input_shape, 1, bias=False))\n batch_size = min(batch_size, train_labels.shape[0])\n train_iter = load_array((train_features, train_labels.reshape(-1, 1)), batch_size)\n test_iter = load_array((test_features, test_labels.reshape(-1, 1)), batch_size, is_train=False)\n trainer = torch.optim.SGD(net.parameters(), lr=0.01)\n animator = Animator(\n xlabel=\"epoch\", ylabel=\"loss\", yscale=\"log\", xlim=[1, num_epochs], ylim=[1e-3, 1e2], legend=[\"train\", \"test\"]\n )\n for epoch in range(num_epochs):\n train_epoch(net, train_iter, loss, trainer)\n if epoch == 0 or (epoch + 1) % 20 == 0:\n animator.add(epoch + 1, (evaluate_loss(net, train_iter, loss), evaluate_loss(net, test_iter, loss)))\n print(\"weight:\", net[0].weight.data.numpy())",
"Degree 3 (matches true function)\nTrain and test loss are similar (no over or underfitting),\nLoss is small, since matches true function. Estimated parameters are close to the true ones.",
"# Pick the first four dimensions, i.e., 1, x, x^2/2!, x^3/3! from the\n# polynomial features\ntrain(poly_features[:n_train, :4], poly_features[n_train:, :4], labels[:n_train], labels[n_train:])",
"Degree 1 (underfitting)",
"# Pick the first two dimensions, i.e., 1, x, from the polynomial features\ntrain(poly_features[:n_train, :2], poly_features[n_train:, :2], labels[:n_train], labels[n_train:])",
"Degree 20 (overfitting)\nAccording to the D2L book, the test loss is higher than training loss.\nHowever, SGD itself has a regularizing effect (even in full batch mode),\nso I cannot reproduce overfitting (even though it would occur using a second order optimizer).",
"# Pick all the dimensions from the polynomial features\ntrain(\n poly_features[:n_train, :],\n poly_features[n_train:, :],\n labels[:n_train],\n labels[n_train:],\n num_epochs=2000,\n batch_size=n_train,\n)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
turbomanage/training-data-analyst
|
courses/machine_learning/deepdive/03_tensorflow/labs/e_ai_platform.ipynb
|
apache-2.0
|
[
"<h1> Scaling up ML using Cloud AI Platform</h1>\n\nIn this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud AI Platform. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud AI Platform. \nLater in the course, we will look at ways to make a more effective machine learning model.\n<h2> Environment variables for project and bucket </h2>\n\nNote that:\n<ol>\n<li> Your project id is the *unique* string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: <b>Project ID:</b> cloud-training-demos </li>\n<li> Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket. </li>\n</ol>\n<b>Change the cell below</b> to reflect your Project ID and bucket name.",
"import os\nPROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID\nBUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME\nREGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1\n\n# for bash\nos.environ['PROJECT'] = PROJECT\nos.environ['BUCKET'] = BUCKET\nos.environ['REGION'] = REGION\nos.environ['TFVERSION'] = '1.14' # Tensorflow version\n\n%%bash\ngcloud config set project $PROJECT\ngcloud config set compute/region $REGION",
"Allow the Cloud AI Platform service account to read/write to the bucket containing training data.",
"%%bash\nPROJECT_ID=$PROJECT\nAUTH_TOKEN=$(gcloud auth print-access-token)\nSVC_ACCOUNT=$(curl -X GET -H \"Content-Type: application/json\" \\\n -H \"Authorization: Bearer $AUTH_TOKEN\" \\\n https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \\\n | python -c \"import json; import sys; response = json.load(sys.stdin); \\\n print(response['serviceAccount'])\")\n\necho \"Authorizing the Cloud AI Platform account $SVC_ACCOUNT to access files in $BUCKET\"\ngsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET\ngsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored\ngsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET",
"<h2> Packaging up the code </h2>\n\nTake your code and put into a standard Python package structure. <a href=\"taxifare/trainer/model.py\">model.py</a> and <a href=\"taxifare/trainer/task.py\">task.py</a> containing the Tensorflow code from earlier (explore the <a href=\"taxifare/trainer/\">directory structure</a>).",
"%%bash\n## check whether there are anymore TODOs \n## exit with 0 to avoid notebook process error\ngrep TODO taxifare/trainer/*.py; rc=$?\n\ncase $rc in \n 0) ;;\n 1) echo \"No more TODOs!\"; exit 0;;\nesac",
"<h2> Find absolute paths to your data </h2>\n\nNote the absolute paths below. /content is mapped in Datalab to where the home icon takes you",
"%%bash\necho $PWD\nrm -rf $PWD/taxi_trained\nhead -1 $PWD/taxi-train.csv\nhead -1 $PWD/taxi-valid.csv",
"<h2> Running the Python module from the command-line </h2>",
"%%bash\nrm -rf taxifare.tar.gz taxi_trained\nexport PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare\npython -m trainer.task \\\n --train_data_paths=\"${PWD}/taxi-train*\" \\\n --eval_data_paths=${PWD}/taxi-valid.csv \\\n --output_dir=${PWD}/taxi_trained \\\n --train_steps=100 --job-dir=./tmp\n\n%%bash\nls $PWD/taxi_trained/export/exporter/\n\n%%writefile ./test.json\n{\"pickuplon\": -73.885262,\"pickuplat\": 40.773008,\"dropofflon\": -73.987232,\"dropofflat\": 40.732403,\"passengers\": 2}\n\n%%bash\nmodel_dir=$(ls ${PWD}/taxi_trained/export/exporter)\ngcloud ai-platform local predict \\\n --model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \\\n --json-instances=./test.json",
"Monitor training with TensorBoard\nTo activate TensorBoard within the JupyterLab UI navigate to \"<b>File</b>\" - \"<b>New Launcher</b>\". Then double-click the 'Tensorboard' icon on the bottom row.\nTensorBoard 1 will appear in the new tab. Navigate through the three tabs to see the active TensorBoard. The 'Graphs' and 'Projector' tabs offer very interesting information including the ability to replay the tests.\nYou may close the TensorBoard tab when you are finished exploring.\n<h2> Running locally using gcloud </h2>",
"%%bash\nrm -rf taxifare.tar.gz taxi_trained\ngcloud ai-platform local train \\\n --module-name=trainer.task \\\n --package-path=${PWD}/taxifare/trainer \\\n -- \\\n --train_data_paths=${PWD}/taxi-train.csv \\\n --eval_data_paths=${PWD}/taxi-valid.csv \\\n --train_steps=1000 \\\n --output_dir=${PWD}/taxi_trained ",
"When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.",
"!ls $PWD/taxi_trained",
"<h2> Submit training job using gcloud </h2>\n\nFirst copy the training data to the cloud. Then, launch a training job.\nAfter you submit the job, go to the cloud console (http://console.cloud.google.com) and select <b>AI Platform | Jobs</b> to monitor progress. \n<b>Note:</b> Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.",
"%%bash\necho $BUCKET\ngsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/\ngsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/\n\n%%bash\nOUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained\nJOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/taxifare/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=BASIC \\\n --runtime-version=$TFVERSION \\\n -- \\\n --train_data_paths=\"gs://${BUCKET}/taxifare/smallinput/taxi-train*\" \\\n --eval_data_paths=\"gs://${BUCKET}/taxifare/smallinput/taxi-valid*\" \\\n --output_dir=$OUTDIR \\\n --train_steps=10000",
"Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. \n<b>Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.</b>\n<h2> Deploy model </h2>\n\nFind out the actual name of the subdirectory where the model is stored and use it to deploy the model. Deploying model will take up to <b>5 minutes</b>.",
"%%bash\ngsutil ls gs://${BUCKET}/taxifare/smallinput/taxi_trained/export/exporter\n\n%%bash\nMODEL_NAME=\"taxifare\"\nMODEL_VERSION=\"v1\"\nMODEL_LOCATION=$(gsutil ls gs://${BUCKET}/taxifare/smallinput/taxi_trained/export/exporter | tail -1)\necho \"Run these commands one-by-one (the very first time, you'll create a model and then create a version)\"\n#gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME}\n#gcloud ai-platform models delete ${MODEL_NAME}\ngcloud ai-platform models create ${MODEL_NAME} --regions $REGION\ngcloud ai-platform versions create ${MODEL_VERSION} --model ${MODEL_NAME} --origin ${MODEL_LOCATION} --runtime-version $TFVERSION",
"<h2> Prediction </h2>",
"%%bash\ngcloud ai-platform predict --model=taxifare --version=v1 --json-instances=./test.json\n\nfrom googleapiclient import discovery\nfrom oauth2client.client import GoogleCredentials\nimport json\n\ncredentials = GoogleCredentials.get_application_default()\napi = discovery.build('ml', 'v1', credentials=credentials,\n discoveryServiceUrl='https://storage.googleapis.com/cloud-ml/discovery/ml_v1_discovery.json')\n\nrequest_data = {'instances':\n [\n {\n 'pickuplon': -73.885262,\n 'pickuplat': 40.773008,\n 'dropofflon': -73.987232,\n 'dropofflat': 40.732403,\n 'passengers': 2,\n }\n ]\n}\n\nparent = 'projects/%s/models/%s/versions/%s' % (PROJECT, 'taxifare', 'v1')\nresponse = api.projects().predict(body=request_data, name=parent).execute()\nprint (\"response={0}\".format(response))",
"<h2> Train on larger dataset </h2>\n\nI have already followed the steps below and the files are already available. <b> You don't need to do the steps in this comment. </b> In the next chapter (on feature engineering), we will avoid all this manual processing by using Cloud Dataflow.\nGo to http://bigquery.cloud.google.com/ and type the query:\n<pre>\nSELECT\n (tolls_amount + fare_amount) AS fare_amount,\n pickup_longitude AS pickuplon,\n pickup_latitude AS pickuplat,\n dropoff_longitude AS dropofflon,\n dropoff_latitude AS dropofflat,\n passenger_count*1.0 AS passengers,\n 'nokeyindata' AS key\nFROM\n [nyc-tlc:yellow.trips]\nWHERE\n trip_distance > 0\n AND fare_amount >= 2.5\n AND pickup_longitude > -78\n AND pickup_longitude < -70\n AND dropoff_longitude > -78\n AND dropoff_longitude < -70\n AND pickup_latitude > 37\n AND pickup_latitude < 45\n AND dropoff_latitude > 37\n AND dropoff_latitude < 45\n AND passenger_count > 0\n AND ABS(HASH(pickup_datetime)) % 1000 == 1\n</pre>\n\nNote that this is now 1,000,000 rows (i.e. 100x the original dataset). Export this to CSV using the following steps (Note that <b>I have already done this and made the resulting GCS data publicly available</b>, so you don't need to do it.):\n<ol>\n<li> Click on the \"Save As Table\" button and note down the name of the dataset and table.\n<li> On the BigQuery console, find the newly exported table in the left-hand-side menu, and click on the name.\n<li> Click on \"Export Table\"\n<li> Supply your bucket name and give it the name train.csv (for example: gs://cloud-training-demos-ml/taxifare/ch3/train.csv). Note down what this is. Wait for the job to finish (look at the \"Job History\" on the left-hand-side menu)\n<li> In the query above, change the final \"== 1\" to \"== 2\" and export this to Cloud Storage as valid.csv (e.g. gs://cloud-training-demos-ml/taxifare/ch3/valid.csv)\n<li> Download the two files, remove the header line and upload it back to GCS.\n</ol>\n\n<p/>\n<p/>\n\n<h2> Run Cloud training on 1-million row dataset </h2>\n\nThis took 60 minutes and uses as input 1-million rows. The model is exactly the same as above. The only changes are to the input (to use the larger dataset) and to the Cloud MLE tier (to use STANDARD_1 instead of BASIC -- STANDARD_1 is approximately 10x more powerful than BASIC). At the end of the training the loss was 32, but the RMSE (calculated on the validation dataset) was stubbornly at 9.03. So, simply adding more data doesn't help.",
"%%bash\n\nXXXXX this takes 60 minutes. if you are sure you want to run it, then remove this line.\n\nOUTDIR=gs://${BUCKET}/taxifare/ch3/taxi_trained\nJOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)\nCRS_BUCKET=cloud-training-demos # use the already exported data\necho $OUTDIR $REGION $JOBNAME\ngsutil -m rm -rf $OUTDIR\ngcloud ai-platform jobs submit training $JOBNAME \\\n --region=$REGION \\\n --module-name=trainer.task \\\n --package-path=${PWD}/taxifare/trainer \\\n --job-dir=$OUTDIR \\\n --staging-bucket=gs://$BUCKET \\\n --scale-tier=STANDARD_1 \\\n --runtime-version=$TFVERSION \\\n -- \\\n --train_data_paths=\"gs://${CRS_BUCKET}/taxifare/ch3/train.csv\" \\\n --eval_data_paths=\"gs://${CRS_BUCKET}/taxifare/ch3/valid.csv\" \\\n --output_dir=$OUTDIR \\\n --train_steps=100000",
"Challenge Exercise\nModify your solution to the challenge exercise in d_trainandevaluate.ipynb appropriately. Make sure that you implement training and deployment. Increase the size of your dataset by 10x since you are running on the cloud. Does your accuracy improve?\nCopyright 2016 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
as595/AllOfYourBases
|
MISC/Faraday.ipynb
|
gpl-3.0
|
[
"[171109 - AMS] Created notebook\nThis notebook re-creates Fig 1 from Brentjens & de Bruyn (2005)",
"# this embeds plots in the notebook\n%matplotlib inline \n\nimport numpy as np # for arrays\nimport pylab as pl # for plotting",
"Make a function for the Galactic foreground:\n$$\nF_{\\rm gal}(\\phi) = (2\\phi_{\\rm fg})^{-1}~~~~-\\phi_{\\rm fg} \\lt \\phi \\lt \\phi_{\\rm fg}\n$$\nand zero elsewhere. Therefore:\n$$\nP_{\\rm gal}(\\lambda^2) = \\frac{\\sin (2\\phi_{\\rm fg} \\lambda^2)}{2\\phi_{\\rm fg}\\lambda^2}\n$$\nMake an array of $\\lambda^2$ values:",
"lam_sq = np.arange(0.01,1,0.01)",
"We're going to specify that $\\phi_{\\rm fg}= 2\\,{\\rm rad\\,m^{-2}}$. We can then compute the Galactic contribution at each value of $\\lambda^2$:",
"phi_fg = 2.\nP_gal = np.sin(2*phi_fg*lam_sq)/(2*phi_fg*lam_sq) + 0*1j",
"Now make a function for the radio galaxy lobe:\n$$\nF_{\\rm rg}(\\phi) = 0.25\\delta(\\phi - \\phi_1)\n$$\ntherefore:\n$$\nP_{\\rm rg}(\\lambda^2) = 0.25 \\exp (2i\\phi_1 \\lambda^2) \n$$\nwhich is equivalent to:\n$$\nP_{\\rm rg}(\\lambda^2) = 0.25 \\cos (2\\phi_1 \\lambda^2) + 0.25 i \\sin (2\\phi_1 \\lambda^2)\n$$\nso,\n$$\nQ_{\\rm rg}(\\lambda^2) = 0.25 \\cos (2\\phi_1 \\lambda^2) \n$$\nand \n$$\nU_{\\rm rg}(\\lambda^2) = 0.25 \\sin (2\\phi_1 \\lambda^2) \n$$\nWe're going to specify that $\\phi_1= 10\\,{\\rm rad\\,m^{-2}}$. We can then compute the contribution from the radio galaxy lobe at each value of $\\lambda^2$:",
"phi_1 = 10.\nP_rg = 0.25*np.cos(2*phi_1*lam_sq) + 1j*0.25*np.sin(2*phi_1*lam_sq)",
"The total polarized signal will be the sum of the radio galaxy contribution and the Galactic contribution:",
"P_tot = P_gal + P_rg",
"Now let's re-create Fig. 1 from Brentjens & de Bruyn (2005; https://arxiv.org/pdf/astro-ph/0507349.pdf)\nFirst let's plot $Q_{\\rm gal}$ (called $Q_{\\rm fg}$ in the paper):",
"pl.subplot(111)\npl.plot(lam_sq,P_gal.real,ls='--')\npl.xlabel(r\"$\\lambda^2$ [m$^2$]\")\npl.ylabel(\"Flux [Jy]\")\npl.axis([0,1,-0.2,1.4])\npl.show()",
"Now let's plot on the magnitude of the total polarization as well:",
"pl.subplot(111)\npl.plot(lam_sq,P_gal.real,ls='--')\npl.plot(lam_sq,np.absolute(P_tot),ls=':')\npl.xlabel(r\"$\\lambda^2$ [m$^2$]\")\npl.ylabel(\"Flux [Jy]\")\npl.axis([0,1,-0.2,1.4])\npl.show()",
"Now let's calculate the polarization angle:\n$$\n\\chi = 0.5\\tan^{-1}\\left(\\frac{U}{Q}\\right)\n$$\nwhere $U$ is the imaginary part of the complex polarization, $P$, and $Q$ is the real part.",
"chi = 0.5*np.arctan2(P_tot.imag,P_tot.real)\nchi*= (180./np.pi) # convert radians to degrees\n\n# hack to unwrap the arctangent [-pi/2,pi/2] wrap:\nfor i in range(1,len(chi)):\n delta_chi = np.abs(chi[i]-chi[i-1])\n if (delta_chi>45.):\n chi[i:]+=180.\n\npl.subplot(111)\npl.plot(lam_sq,chi)\npl.xlabel(r\"$\\lambda^2$ [m$^2$]\")\npl.ylabel(r\"$\\chi$ [deg]\")\npl.axis([0,1,-50,350])\npl.show()",
"Now plot it all together:",
"fig, ax1 = pl.subplots()\nln1 = ax1.plot(lam_sq, chi, 'b-',label=r\"$\\chi$\")\nax1.set_xlabel(r\"$\\lambda^2$ [m$^2$]\")\nax1.set_ylabel(r\"$\\chi$ [deg]\")\nax1.set_ylim(-50, 350)\n\nax2 = ax1.twinx()\nln2 = ax2.plot(lam_sq,np.absolute(P_tot),ls=':',label=r\"$|P|$\")\nln3 = ax2.plot(lam_sq,P_gal.real,ls='--',label=r\"$Q_{\\rm fg}$\")\nax2.set_ylabel(\"Flux [Jy]\")\nax2.set_ylim(-0.2, 1.4)\n\n# figure legend:\nlns = ln1+ln2+ln3\nlabs = [l.get_label() for l in lns]\nax2.legend(lns, labs, loc=1)\n\nfig.tight_layout()\npl.savefig(\"Fig1.png\")\npl.show()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
vicente-gonzalez-ruiz/YAPT
|
02-basics/14-commenting_code.ipynb
|
cc0-1.0
|
[
"The Docstring system\nA docstring is a string literal that occurs as the first statement in a package, module or function (method). These structures (Python objects) can be properly commented and the documentation processed/retrieved by tools such as <!-- Docutils,--> Sphinx and help().\n1. Example: commenting a class",
"class ClassExample():\n '''This first line is a summary of this class. \\\nThere must be ONLY one logical line in this overview.\n \n However (notice that must be an empty line between the summary and this\n block of text), the rest of this doctrings can be placed in several lines.\n These lines usually provides extended information about the functionality\n of the class. The last line of the docstring should contain only\n three simple quotation marks (as follows).\n '''\n \n x = 1\n \n def __init__(self):\n '''A summary about the constructor. This is an example of a one-line docstring.'''\n print(self.x)\n \n def set(self, x):\n '''A summary about \\\"set\\\".\n \n Arguments:\n x: an integer that ... bla, bla, bla.\n (Notice the indentation of this/these lines)\n \n Returns:\n Nothing.\n '''\n self.x = x\n \n def get(self):\n '''A summary about \\\"get\\\".\n \n Arguments:\n None.\n \n Returns:\n The value of \\\"x\\\".\n '''\n return self.x",
"2. Using help()\nGetting help for a whole class.",
"help(ClassExample)",
"A different way of getting help (in Ipython):",
"ClassExample?",
"Getting help for a single member function:",
"help(ClassExample.get)",
"help() only prints the __doc__ variable:\nAny function, class or module starting with a string literal has a non-empty __doc__ attribute which can be printed to get information.",
"print(ClassExample.get.__doc__)\n\nprint(type(ClassExample.get.__doc__))",
"The same queries can be carried out with an instance.",
"a = ClassExample()\n\nhelp(a)\n\nhelp(a.set)\n\nprint(a.__doc__)\n\nprint(a.get.__doc__)",
"3. Documenting Your Project Using Sphinx\n3.1 Installation\n$ pip install Sphinx # Necessary only once\n3.2 Configuration of the project (example)\n```\n$ pwd\n/Users/vruiz/YAPT\n$ sphinx-quickstart # Necessary only once by project\n\nRoot path for the documentation [.]: <enter>\nSeparate source and build directories (y/n) [n]: <enter>\nName prefix for templates and static dir [_]: <enter>\n\n\n\nProject name: My ClassExample Python Project\nAuthor name(s): Your Name Here\nProject version []: 1.0\n\n\n\nProject release [1.0]: <enter>\nProject language [en]: <enter>\nSource file suffix [.rst]: <enter>\nName of your master document (without suffix) [index]: <enter>\nDo you want to use the epub builder (y/n) [n]: <enter>\n\n\n\nautodoc: automatically insert docstrings from modules (y/n) [n]: y\n\n\n\ndoctest: automatically test code snippets in doctest blocks (y/n) [n]: <enter>\nintersphinx: link between Sphinx documentation of different projects (y/n) [n]: <enter>\ntodo: write \"todo\" entries that can be shown or hidden on build (y/n) [n]: <enter>\ncoverage: checks for documentation coverage (y/n) [n]: <enter>\nimgmath: include math, rendered as PNG or SVG images (y/n) [n]: <enter>\nmathjax: include math, rendered in the browser by MathJax (y/n) [n]: <enter>\nifconfig: conditional inclusion of content based on config values (y/n) [n]: <enter>\nviewcode: include links to the source code of documented Python objects (y/n) [n]: <enter>\ngithubpages: create .nojekyll file to publish the document on GitHub pages (y/n) [n]: <enter>\nCreate Makefile? (y/n) [y]: <enter>\n\n\n\nCreate Windows command file? (y/n) [y]: n\n\n\n$ # Only to see how to add new (sub-)pages ...\n$ cat > extra_documentation.rst << EOF\nExtra Documentation\n===================\nYour extra documentation which has not been included in the code could\nbe placed here.\nEOF\n$ # You need to select which modules will be processed. \n$ cat > code.rst << EOF\nCode Documentation\n==================\n.. automodule:: ClassExample\n :members:\nEOF\n$ # Your index.rst file should show like (use a text editor):\n$ cat index.rst\n.. My ClassExample Python Project documentation master file, created by\n sphinx-quickstart on Thu Dec 8 17:46:01 2016.\n You can adapt this file completely to your liking, but it should at least\n contain the root toctree directive.\nWelcome to My ClassExample Python Project's documentation!\nTesting Docstrings and Sphinx!\n.. toctree::\n :maxdepth: 2\n :caption: Contents:\ncode\n extra_documentation\nIndices and tables\n\n:ref:genindex\n:ref:modindex\n:ref:search\n```\n\n3.3 Compilation of the documentation\nMakes sure to include the path to your modules in in the sys.path in the conf.py file previously generated.\n$ make clean\n$ make html\n3.4 Seeing the results\n$ firefox _build/html/index.html"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ruchika05/demo
|
Notebook/Anomaly-detection-DSWB.ipynb
|
epl-1.0
|
[
"Introduction\nThis Notebook will help you to identify anomalies in your historical timeseries data (IoT data) in simple steps. Also, derive the threshold value for your historical data. This threshold value can be used to set rules in Watson IoT Platform, such that you get an alert when your IoT device reports an abnormal reading in the future.\nAccepted file format\nNote that, this Notebook accepts the CSV file in one of the following file formats: \n2 column format: <Date and time in DD/MM/YYYY or MM/DD/YYYY format, Numeric value>\n1 column format: <Numeric value>\n\nSample data\nIn case if you don’t have any file, try downloading the sample file from this link. The sample file contains a temperature data updated for ever 15 minutes. Also, the sample data contains spikes to demonstrate the danger situation.\nLoad data\nDrag and drop your CSV file into this Notebook. Once the file is uploaded successfully, you can see the file in the Recent Data section. Also, expand the file name and click on Insert Path link to get the location of the file. It must be like, /resources/file-name.\nThe next step is to create the SQL DataFrame from the CSV file. Instead of specifying the schema for a Spark DataFrame programmatically, you can use the pyspark-csv module. It is an external PySpark module and works like the pandas read_csv function.\nEnter the following lines of code into your Notebook to create Spark SQL DataFrame from the given CSV file. Modify the path of the file if its different and click Run. And observe that it prints the schema.",
"from pyspark.sql import SQLContext\n# adding the PySpark module to SparkContext\nsc.addPyFile(\"https://raw.githubusercontent.com/seahboonsiew/pyspark-csv/master/pyspark_csv.py\")\nimport pyspark_csv as pycsv\n\n# you may need to modify this line if the filename or path is different.\nsqlContext = SQLContext(sc)\ndata = sc.textFile(\"/resources/sample-data.csv\")\n\ndef skip_header(idx, iterator):\n if (idx == 0):\n next(iterator)\n return iterator\n\nbody = data.mapPartitionsWithIndex(skip_header)\nheader = data.first()\nheader_list = header.split(\",\")\n# create Spark DataFrame using pyspark-csv\ndata_df = pycsv.csvToDataFrame(sqlContext, body, sep=\",\", columns=header_list)\ndata_df.cache()\ndata_df.printSchema()",
"Enter the following command in the next cell to look at the first record and click Run",
"# retrieve the first row\ndata_df.take(1)",
"Enter the following command in the next cell to get the number of rows in the CSV file (DataFrame) and click Run,",
"# retrieve the number of rows\ndata_df.count()",
"Create Pandas DataFrame\nEnter the following commands in the next cell to create a Pandas DataFrame from the Spark SQL DataFrame and click Run. This line prints the schema of the newly created Pandas DataFrame which will be same as the Spark SQL DataFrame. \nThe Python Data Analysis Library (a.k.a. pandas) provides high-performance, easy-to-use data structures and data analysis tools that are designed to make working with “relational” or “labeled” data both easy and intuitive. Also, plotting is very easy with Pandas DataFrame.",
"# create a pandas dataframe from the SQL dataframe\nimport pprint\nimport pandas as pd\npandaDF = data_df.toPandas()\n#Fill NA/NaN values to 0\npandaDF.fillna(0, inplace=True)\npandaDF.columns",
"Enter the following commands in the next cell to set timestamp as the index if its present and click Run,",
"# change index to time if its present\nvalueHeaderName = 'value'\ntimeHeaderName = 'null'\nif (len(header_list) == 2):\n timeHeaderName = header_list[0]\n valueHeaderName = header_list[1]\nelse:\n valueHeaderName = header_list[0]\n \n# Drop the timestamp column as the index is replaced with timestamp now\nif (len(header_list) == 2):\n pandaDF.index = pandaDF[timeHeaderName]\n pandaDF = pandaDF.drop([timeHeaderName], axis=1)\n # Also, sort the index with the timestamp\n pandaDF.sort_index(inplace=True)\n \npandaDF.head(n=5)",
"Calculate z-score\nWe detect the anomaly events using z-score, aka, a standard score indicating how many standard deviations an element is from the mean.\nEnter the following commands to calculate z-score for each of the values and add it as a new column in the same DataFrame,",
"# calculate z-score and populate a new column\npandaDF['zscore'] = (pandaDF[valueHeaderName] - pandaDF[valueHeaderName].mean())/pandaDF[valueHeaderName].std(ddof=0)\npandaDF.head(n=5) ",
"Plot Anomalies\nWhen we work in notebooks, we can decide how to present your anlysis results and derived information. So far, we have used normal print functions, which are informative. However, we can also show the results in a visual way by using the popular matplotlib package to create plots.\nEnter the following snippet of the code in the next cell to view the anomaly events in your data and click Run. Observe that the values for which the z-score is above 3 or below -3, marked as abnormal events in the graph shown below,",
"# ignore warnings if any\nimport warnings\nwarnings.filterwarnings('ignore')\n\n# render the results as inline charts:\n%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nThis function detects the spike and dip by returning a non-zero value \nwhen the z-score is above 3 (spike) and below -3(dip). Incase if you \nwant to capture the smaller spikes and dips, lower the zscore value from \n3 to 2 in this function.\n'''\ndef spike(row):\n if(row['zscore'] >=3 or row['zscore'] <=-3):\n return row[valueHeaderName]\n else:\n return 0\n \npandaDF['spike'] = pandaDF.apply(spike, axis=1)\n# select rows that are required for plotting\nplotDF = pandaDF[[valueHeaderName,'spike']]\n#calculate the y minimum value\ny_min = (pandaDF[valueHeaderName].max() - pandaDF[valueHeaderName].min()) / 10\nfig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')\nax.set_ylim(plotDF[valueHeaderName].min() - y_min, plotDF[valueHeaderName].max() + y_min)\nx_filt = plotDF.index[plotDF.spike != 0]\nplotDF['xyvaluexy'] = plotDF[valueHeaderName]\ny_filt = plotDF.xyvaluexy[plotDF.spike != 0]\n#Plot the raw data in blue colour\nline1 = ax.plot(plotDF.index, plotDF[valueHeaderName], '-', color='blue', animated = True, linewidth=1)\n#plot the anomalies in red circle\nline2 = ax.plot(x_filt, y_filt, 'ro', color='red', linewidth=2, animated = True)\n#Fill the raw area\nax.fill_between(plotDF.index, (pandaDF[valueHeaderName].min() - y_min), plotDF[valueHeaderName], interpolate=True, color='blue',alpha=0.6)\n\n# Label the axis\nax.set_xlabel(\"Sequence\",fontsize=20)\nax.set_ylabel(valueHeaderName,fontsize=20)\n\nplt.tight_layout()\nplt.legend()\nplt.show()",
"As shown, the red marks are the unexpected spikes and dips whose z-score value is greater than 3 or less than -3. Incase if you want to detect the lower spikes, modify the value to 2 or even lower and run. Similarly, if you want to detect only the higher spikes, try increasing the z-score value from 3 to 4 and beyond.\nDerive thresholds\nEnter the following command into the next cell to derive the Spike threshold value corresponding to z-score value 3 and click Run.",
"# calculate the value that is corresponding to z-score 3\n(pandaDF[valueHeaderName].std(ddof=0) * 3) + pandaDF[valueHeaderName].mean()",
"Similarly, Enter the following command into the next cell to derive the dip threshold value corresponding to z-score value -3.",
"# calculate the value that is corresponding to z-score -3\n(pandaDF[valueHeaderName].std(ddof=0) * -3) + pandaDF[valueHeaderName].mean()",
"This threshold value can be used to create a rule in the Watson IoT Platform to detect anomalies in the current IoT device events in realtime. This will create an alert in realtime when the current sensor reading crosses the threshold value.\nSummary\nThis Notebook showed how to use the z-score to detect anomalies in the historical timeseries data in simple steps. Also, showed how one can derive the threshold values for the given historical data to create rules accordingly in IBM Watson IoT Platform. Developers can consider this Notebook as a template for detecting anomalies in their historical IoT data and modify the python code depending upon the use case."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
sdpython/ensae_teaching_cs
|
_doc/notebooks/td2a/td2a_cenonce_session_2E.ipynb
|
mit
|
[
"2A.i - Sérialisation\nCharger un dataframe depuis un fichier texte prend du temps car il faut convertir le texte en nombre. La sérialisation permet de copier le contenu depuis la mémoire vers le disque. A la prochaine utilisation, Python a juste besoin de recopier le bloc depuis le disque et de le copier sans trop le modifier en mémoire. Sérialiser un dataframe permet de le récupérer beaucoup plus vite.",
"from jyquickhelper import add_notebook_menu\nadd_notebook_menu()",
"Sérialisation\nLa sérialisation désigne l'action de sauvegarder un objet dans un fichier telle qu'il est représentée dans la mémoire de l'ordinateur. De cette façon, la relecture de l'objet en question est plus rapide. La difficulté réside dans la sérialisation d'objets composites comme une liste qui contient un dictionnaire qui contient une liste d'autres listes. Sans rentrer dans le détail de l'implémentation, la plupart des objets en Python sont sérialisables ainsi qu'un objet composé de ces objets. Cela s'effectue avec le module pickle.",
"import pickle\nl = [ {3:\"4\"}, \"4\", -5.5, [6, None]]\nwith open(\"objet_serialise.bin\", \"wb\") as f :\n pickle.dump(l, f)",
"Puis on récupère les données :",
"with open(\"objet_serialise.bin\", \"rb\") as f :\n obj = pickle.load(f)\nobj",
"DataFrame\nIl existe une méthode spécifique pour les DataFrame : to_pickle qu'on relie avec la méthode read_pickle.",
"import pandas\ndf = pandas.DataFrame( [ {\"name\":\"xavier\", \"school\":\"ENSAE\"},\n {\"name\":\"antoine\", \"school\":\"ENSAE\"} ] )\ndf.to_pickle(\"df_serialize.bin\")",
"Puis on relit le fichier :",
"df2 = pandas.read_pickle(\"df_serialize.bin\")\ndf2",
"Exercice 1 : sérialisation d'un gros dataframe\nOn veut comparer le temps de chargement du même dataframe depuis un fichier texte et depuis un contenant le dataframe sérialisé. Dans un premier temps, on génère un gros dataframe qu'on sauve sous fichier texte puis on le sérialise. On compare ensuite les temps de chargement.\nExercice 2 : sérialisation json\nLe module pickle produit des fichiers binaires qui ne sont pas lisible autrement que par Python. Et le format peut changer d'une version de Python à l'autre. On lui préfère souvent un format texte comme json. Reprendre l'exercice 1 avec le module jsonpickle.\nQuestion\nQue veut dire un message comme celui-ci présent dans la documentation de jsonpickle ?\nWarning jsonpickle can execute arbitrary Python code. Do not load jsonpickles from untrusted / unauthenticated sources.\nAutre option : dill\nLe module dill étend un peu les fonctionnalité de pickle. Ce dernier a quelques soucis pour récupérer des objets sérialisés avec d'autres versions de Python. dill est encore à l'état de développement mais il devrait être plus robuste dans ce cas particulier."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
palrogg/foundations-homework
|
07/Homework7.ipynb
|
mit
|
[
"Contents:\n[These anchors doesn't work on GitHub.com, although they are relative - sorry]\n* 1 Cats and dogs\n* 2 Millionaires\n* 3 Train stations\n1 Cats and dogs",
"import pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\ndf = pd.read_csv(\"07-hw-animals.csv\")\n\ndf.columns\n\ndf.head(3)\n\ndf.sort_values(by='length', ascending=False).head(3)\n\ndf['animal'].value_counts()\n\ndogs = df[df['animal']=='dog']\ndogs\n\ndf[df['length'] > 40]\n\ndf['inches'] = .393701 * df['length']\n\ndf\n\ncats = df[df['animal']=='cat']\ndogs = df[df['animal']=='dog']\n\n\n# Display all of the animals that are cats and above 12 inches long.\n# First do it using the \"cats\" variable, then do it using your normal dataframe.\n\ncats[cats['inches'] > 12]\ndf[(df['animal'] == 'cat') & (df['inches'] > 12)]\n\ncats['length'].describe()[['mean']]\n\ndogs['length'].describe()[['mean']]\n\nanimals = df.groupby( [ \"animal\"] )\nanimals['length'].mean()\n\nplt.style.use('ggplot')\ndogs['length'].hist()\n\nlabels = dogs['name']\nsizes = dogs['length']\nexplode = (0.1, 0.2, 0.2) # fun\n\nplt.pie(sizes, explode=explode, labels=labels,\n autopct='%1.2f%%', shadow=True, startangle=30)\n\n#cf: recent.head().plot(kind='pie', y='networthusbillion', labels=recent['name'].head(), legend=False)\n\n#Make a horizontal bar graph of the length of the animals, with their name as the label\ndf.plot(kind='barh', x='name', y='length', legend=False)\n\n#Make a sorted horizontal bar graph of the cats, with the larger cats on top.\ncats.sort_values(by='length').plot(kind='barh', x='name', y='length', legend=False)\n",
"2 Millionaires\nWhat country are most billionaires from? For the top ones, how many billionaires per billion people?",
"df2 = pd.read_excel(\"richpeople.xlsx\")\n\ndf2.keys()\n\ntop_countries = df2['citizenship'].value_counts().head(10)\n\ndf_top_countries = pd.DataFrame.from_dict(top_countries)\ndf_top_countries['Country Name'] = df_top_countries.index\ndf_top_countries\n\n# population: data from http://data.worldbank.org/indicator/SP.POP.TOTL\ndf_pop = pd.read_csv(\"world_pop.csv\", header=2)\n\n\ndf_pop_mini = pd.DataFrame.from_records(df_pop[['Country Name','2014']])\n\ndfx = df_top_countries.merge(df_pop_mini, on='Country Name', how='left')\n\npd.set_option('display.float_format', lambda x: '%.2f' % x) # to get rid of Pandas automatic scientific notation\ndfx['Billionaires per million of habitants'] = (dfx['citizenship']*1000000) / dfx['2014']\n\ndfx\n",
"What's the average wealth of a billionaire? Male? Female?",
"print(\"The average wealth of a billionaire (in billions) is:\", df2['networthusbillion'].describe()['mean'])\n\nprint(\"The average wealth of a male billionaire is:\", df2[df2['gender'] == 'male']['networthusbillion'].describe()['mean'])\n\nprint(\"The average wealth of a female billionaire is:\", df2[df2['gender'] == 'female']['networthusbillion'].describe()['mean'])\n\n",
"Who is the poorest billionaire? Who are the top 10 poorest billionaires?",
"print('The poorest billionaire is:', df2.get_value(df2.sort_values('networthusbillion', ascending=True).index[0],'name'))\n\ndf2.sort_values('networthusbillion', ascending=True).head(10)",
"What is 'relationship to company'? And what are the most common relationships?",
"#relationship_values = set\nrelationship_list = df2['relationshiptocompany'].tolist()\nrelationship_set = set(relationship_list)\nrelationship_set = [s.strip() for s in relationship_set if s == s] # to remove a naughty NaN and get rid of dumb whitespaces\n\nprint(\"The relationships are: “\" + str.join('”, “', relationship_set) + \"”.\")\n\nprint('\\nThe five most common relationships are:')\n\n# DataFrame from dict: to make it pretty\npd.DataFrame.from_dict(df2['relationshiptocompany'].value_counts().head(5))",
"Most common source of wealth? Male vs. female?",
"print(\"The three most common sources of wealth are:\\n\" + str(df2['typeofwealth'].value_counts().head(3)))\n\nprint(\"\\nFor men, they are:\\n\" + str(df2[df2['gender'] == 'male']['typeofwealth'].value_counts().head(3)))\n\nprint(\"\\nFor women, they are:\\n\" + str(df2[df2['gender'] == 'female']['typeofwealth'].value_counts().head(3)))",
"Given the richest person in a country, what % of the GDP is their wealth?",
"per_country = df2.groupby(['citizenship'])\n#per_country['networthusbillion'].max()\n#per_country['networthusbillion'].idxmax() # DataFrame.max(axis=None, skipna=None, level=None, numeric_only=None, **kwargs)\n# per_country['gdpcurrentus']\n\ndf2['percofgdp'] = (100*1000000000*df2['networthusbillion']) / (df2['gdpcurrentus'])\n\n\n#pd.Series([\"{0:.2f}%\".format(percofgdp)])\n\nprint(\"NB: most countries doesn't have their GDP in the 'gdpcurrentus' column.\")\n\ndf2.loc[per_country['networthusbillion'].idxmax()][['name', 'networthusbillion', 'percofgdp']]\n\n",
"3 Train stations",
"df_trains = pd.read_csv(\"stations.csv\", delimiter=';')\nprint('A dataset about train stations and their accessibility in Switzerland:')\ndf_trains"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
opencb/opencga
|
opencga-client/src/main/python/notebooks/user-training/pyopencga_clinical_queries.ipynb
|
apache-2.0
|
[
"Clinical Queries\nSetup the Client and Login into pyopencga\nConfiguration and Credentials \nLet's assume we already have pyopencga installed in our python setup (all the steps described on pyopencga_first_steps.ipynb).\nYou need to provide at least a host server URL in the standard configuration format for OpenCGA as a python dictionary or in a json file.",
"## Step 1. Import pyopencga dependecies\nfrom pyopencga.opencga_config import ClientConfiguration # import configuration module\nfrom pyopencga.opencga_client import OpencgaClient # import client module\nfrom pprint import pprint\nfrom IPython.display import JSON\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\n## Step 2. User credentials\nuser = 'demouser'\n####################################\n\n## Step 3. Create the ClientConfiguration dict\nhost = 'http://bioinfo.hpc.cam.ac.uk/opencga-prod'\nconfig_dict = {'rest': {\n 'host': host\n }\n }\n\n## Step 4. Create the ClientConfiguration and OpenCGA client\nconfig = ClientConfiguration(config_dict)\noc = OpencgaClient(config)\n\n## Step 5. Login to OpenCGA using the OpenCGA client- add password when prompted\noc.login(user)\n\nprint('Logged succesfuly to {}, your token is: {} well done!'.format(host, oc.token))\n",
"Define some common variables\nHere you can define some variables that will be used repeatedly over the notebook.",
"# Define the study id\nstudy = 'reanalysis:rd38'\n\n# Define a clinicalCaseId\ncase_id = 'OPA-10044-1'\n\n# Define a interpretationId\ninterpretation_id = 'OPA-10044-1__2'",
"1. Comon Queries for Clinical Analysis\nRetrieve cases in a study\n\nThe query below retrieves the cases in a study. For performance reasons, we have limited the number of results retrieved in the query.\nYou can change the parameter limit to controle the number of cases you want to retrieve for the query. \nYou can also control the information you want to retrieve and print from the cases with the parameters include and fields.",
"## Query using the clinical search web service\ncases_search = oc.clinical.search(study=study, include='id,type,proband,description,panels,interpretation', limit=5)\ncases_search.print_results(title='Cases found for study {}'.format(study), fields='id,type,proband.id,panels.id,interpretation.id')\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(cases_search.get_results())",
"Proband information: List of disorders and HPO terms from proband of a case\n\nThe proband field from a case contains all the information related to a proband, including phenotypes and disorders.\nYou can retrieve all the phenotypes and disorders of a proband from a case by inspecting the information at the proband level. We'll use the random case_id defined above:",
"## Query using the clinical info web service\ndisorder_search = oc.clinical.search(study=study, include='id,type,proband', limit=5)\ndisorder_search.print_results(title='Disorders and phenotypes', fields='id,type,proband.id')\n\ndisorder_object = disorder_search.get_results()[0]['proband']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(disorder_object)",
"Check the interpretation id of a case\n\nYou can find theinterpretation id from a case. This is useful to perform subsequent queries for that interpretation.\nNote that you can control the fields that are printed by the function print_results with the parameter fields. To see the whole clinical analysis object, you can use the interactive JSON viewer below.",
"# Query using the clinical info web service\nclinical_info = oc.clinical.info(clinical_analysis=case_id, study=study)\nclinical_info.print_results(fields='id,interpretation.id,type,proband.id')\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(clinical_info.get_results()[0]['interpretation'])",
"Inspect the Interpretation object\n\nHere you will retrieve many useful information from a case interpretation.",
"## Query using the clinical info_interpretation web service\ninterpretation_object = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', study=study).get_results()\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(interpretation_object)",
"Check Reported pathogenic variants in a case interpretation and list the variant tier\n\nRun the cell below to retrieve the interpretation stats, including the pathogenic variants reported in a case.",
"## Query using the clinical info_interpretation web service\ninterpretation_stats = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='stats', study=study).get_results()[0]['stats']['primaryFindings']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(interpretation_stats)",
"Retrieve the annotation for the reported variants\n\nRun the cell below to retrieve the annotation for the variants obtained",
"## Query using the clinical info_interpretation web service\nvariant_annotation = oc.clinical.info_interpretation(interpretations='OPA-12120-1__2', include='primaryFindings.annotation', study=study).get_results()[0]['primaryFindings']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(variant_annotation)",
"PanelApp panels applied in the original analysis\n\nObtain the list of genes that were in the panel at the time of the original analysis",
"cases_search = oc.clinical.search(study=study, include='id,panels', limit= 5)\ncases_search.print_results(title='Cases found for study {}'.format(study), fields='id,panels.id')\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(cases_search.get_results())",
"2. Use Case\nSituation: I want to retrieve a case, check whether the case has a reported pathogenic variant. Retriev the annotation information about these variants, if available.\nFinally, I want to come up with the list of tier 1, 2 and 3 variants for the sample.\n1. Search Cases in the study and select one random case.\n\nFirst you need to perform the query of searching over all the cases in a study. Uncomment the second line to have a look at the JSON with all the cases in the study.\n\nNote that this query can take time because there is plenty of information. it is recommended to restrict the search to a number of cases with the parameter limit as below:",
"## Search the cases\ncases_search = oc.clinical.search(study=study, limit=3)\n## Uncomment next line to display an interactive JSON viewer\n# JSON(cases_search.get_results())",
"Now you can select one random case id for the subsequent analysis",
"## Define an empty list to keep the case ids:\ncase_ids = []\n\n## Iterate over the cases and retrieve the ids:\nfor case in oc.clinical.search(study=study, include='id').result_iterator():\n case_ids.append(case['id'])\n\n## Uncomment for printing the list with all the case ids\n# print(case_ids)\n\n## Select a random case from the list\nimport random\nif case_ids != []:\n print('There are {} cases in study {}'.format(len(case_ids), study))\n selected_case = random.choice(case_ids)\n print('Case selected for analysis is {}'.format(selected_case))\nelse:\n print('There are no cases in the study', study)",
"2. Retrieve the interpretation id/s from the seleted case",
"## Query using the clinical info web service\ninterpretation_info = oc.clinical.info(clinical_analysis=selected_case, study=study)\ninterpretation_info.print_results(fields='id,interpretation.id,type,proband.id')\n\n## Select interpretation object \ninterpretation_object = interpretation_info.get_results()[0]['interpretation']\n\n## Select interpretation id \ninterpretation_id = interpretation_info.get_results()[0]['interpretation']['id']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(interpretation_object)\n\nprint('The interpretation id for case {} is {}'.format(selected_case, interpretation_object['id'] ))",
"3. Retrieve reported variants and the annotation, including tiering\n\nObtain the interpretation stats from the case",
"## Query using the clinical info_interpretation web service\ninterpretation_stats = oc.clinical.info_interpretation(interpretations=interpretation_id, include='stats', study=study).get_results()[0]['stats']['primaryFindings']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(interpretation_stats)",
"Obtain annotation from variants reported in a interpretation from a case as a JSON object",
"## Query using the clinical info_interpretation web service\nprimary_findings = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study).get_results()[0]['primaryFindings']\n\n## Uncomment next line to display an interactive JSON viewer\n# JSON(primary_findings)",
"Obtain tiering: variant ids, genes, and tier from a case interpretation",
"## Perform the query\nvariants_reported = oc.clinical.info_interpretation(interpretations=interpretation_id, study=study)\n\n## Define empty list to store the variants, genes and the tiering\nvariant_list = []\ngene_id_list=[]\ngenename_list=[]\ntier_list =[]\n\n\nfor variant in variants_reported.get_results()[0]['primaryFindings']:\n variant_id = variant['id']\n variant_list.append(variant_id)\n gene_id = variant['evidences'][0]['genomicFeature']['id']\n gene_id_list.append(gene_id)\n gene_name = variant['evidences'][0]['genomicFeature']['geneName']\n genename_list.append(gene_name)\n tier = variant['evidences'][0]['classification']['tier']\n tier_list.append(tier)\n \n## Construct a Dataframe and return the first 5 rows\ndf = pd.DataFrame(data = {'variant_id':variant_list, 'gene_id':gene_id_list, 'gene_name':genename_list, 'tier': tier_list})\ndf.head()\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
fabiencampillo/systemes_dynamiques_agronomie
|
Partie6_Control_Obs_Sat.ipynb
|
gpl-3.0
|
[
"Partie 6: Contrôle de systèmes dynamiques II\n\nObservateurs\nSaturations\n\nLes parties sur les observateurs sont tirées du livre édité en 2008 par Denis Dochain et intitulé \"Automatic Control of Bioprocesses\".",
"# -*- coding: utf-8 -*-\nfrom IPython.display import HTML\n\nHTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\nPour afficher le code python, cliquer sur le bouton: \n<button onclick=\"javascript:code_toggle()\">Afficher code python</button>\n''')\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom ipywidgets import interact, fixed\n#from IPython.html.widgets import interact, fixed\nimport scipy.integrate as scint\nfrom matplotlib import patches as pat",
"<a name=\"Obs\">Observateurs</a>\nLes lois de commande qui sont construites (PID ou autres types) dépendent de certaines variables du système (variables d'état, d'entrée ou de sortie). \nPar exemple, pour le contrôle de la concentration du susbtrat $S$ dans un réacteur continu, exemple que l'on a considéré précédemment, la commande PID:\n$$ Q(t)=V\\mu(S^\\ast)+K_p(S^\\ast-S^m(t))+K_i\\int_0^t(S^\\ast-S^m(s))ds+K_d \\frac{d(S^\\ast-S^m)}{dt}$$\ndépend de la concentration de substrat ou plus exactement de sa mesure $S^m$.\nOr, cette quantité n'est pas forcément mesurée, et d'autant moins en temps réel. Pour appliquer la commande, il va donc falloir que l'on estime la valeur de $S$. C'est ce que vont permettre de faire les observateurs.\nLe principe de l'observateur est donné sur la figure suivante:\n\nDéfinition: Un observateur pour le système:\n$$\\left{\n\\begin{array}{rcl}\n \\dot{x}&=&f(x,u)\\\ny&=&h(x,u)\n\\end{array}\n\\right. \n$$\nest un système d'équations de la forme:\n$$ \\left{\n\\begin{array}{rcl}\n \\dot{z}&=&\\hat{f}(z,y,u)\\\n\\hat{x}&=&\\hat{h}(z,y,u)\n\\end{array}\n\\right. $$\ntel que $\\hat{x}$ donne une \"bonne\" estimation de $x$, ce que l'on peut traduire par:\n$$ \\lim_{t\\rightarrow \\infty} \\left\\Vert x-\\hat{x} \\right\\Vert = 0$$\nParmi tous les observateurs possibles, on va chercher ceux qui ont les deux propriétés intéressantes suivantes:\n\nl'observateur doit converger plus rapidement que la dynamique du système\nsi $\\hat{x}(0)=x(0)$ alors $\\hat{x}(t)=x(t)$ pour tout $t>0$, ce qui veut dire que si on part de la bonne valeur de l'état, alors on doit pouvoir estimer parfaitement l'état aux temps suivants\n\nUne forme d'observateur couramment utilisée, qui peut avoir les propriétés recherchées, est la suivante:\n$$ \\left{\n\\begin{array}{rcl}\n \\dot{z}&=&f(z,u)+C\\left[h(z,u)-y\\right]\\\n\\hat{x}&=&z\n\\end{array}\n\\right. $$\nL'équation en $z$ est constitué d'un premier terme qui copie la dynamique du système à observer, et d'un second terme correctif qui dépend de l'erreur entre l'estimation donnée par l'observateur et la sortie (mesurée) $y$ du système.\n\nExemple: Croissance d'une population dans un réacteur continu (suite)\nUn modèle du système est donné par:\n$$\n\\left{\n\\begin{array}{crl}\n\\frac{dB}{dt}= & \\mu(S)B &-\\frac{Q}{V}B\\\n\\frac{dS}{dt}= & -k\\mu(S)B&+\\frac{Q}{V}(S_0-S) \\\n\\end{array}\n\\right.\n$$\nSupposons que l'on ne mesure que le dégagement de gaz représenté par la quantité $\\mu(S)B$. Alors on a:\n$$ y = \\mu(S)B =: h(S,B)$$\nUn observateur du système est donné par:\n$$\n\\left{\n\\begin{array}{crl}\n\\frac{d\\hat{B}}{dt}= & \\mu(\\hat{S})\\hat{B} &-\\frac{Q}{V}\\hat{B}+C_1\\left[\\mu(\\hat{S})\\hat{B}-y\\right]\\\n\\frac{d\\hat{S}}{dt}= & -k\\mu(\\hat{S})\\hat{B}&+\\frac{Q}{V}(S_0-\\hat{S})+C_2\\left[\\mu(\\hat{S})\\hat{B}-y\\right]\\\n\\end{array}\n\\right.\n$$",
"def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Sast,control_type,coeffcontrol,obs,Cobs,disturb):\n B = x[0] #biomass\n S = x[1] #substrat\n \n if control_type in ['PI','PID']:\n indObs = 3\n Shat = x[indObs+1]\n dx = np.zeros(6)\n dx[2] = Sast-S\n dx[5] = Sast-Shat\n else: \n dx = np.zeros(4)\n indObs = 2\n \n if obs == 0:\n xest = x[0:indObs]\n elif obs == 1:\n xest = x[indObs:]\n \n Q = fonction_u(t,xest,Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)*(1+disturb)\n \n mu = muast*S/(KS+S+S**2/KI)\n dx[0] = mu*B-Q/V*B\n dx[1] = -k*mu*B+Q/V*(S0-S)\n \n Bhat = x[indObs]\n Shat = x[indObs+1]\n muhat = muast*Shat/(KS+Shat+Shat**2/KI)\n dx[indObs] = muhat*Bhat-Q/V*Bhat+Cobs[0]*(muhat*Bhat-mu*B)\n dx[indObs+1] = -k*muhat*Bhat+Q/V*(S0-Shat)+Cobs[1]*(muhat*Bhat-mu*B)\n return dx\n\ndef culture_cont2(Sast,control_type,coeffcontrol,obs,coeffobs,disturb):\n tmax = 100\n temps = np.linspace(0,tmax,2000) # vecteur temps\n\n k = 0.6; muast = 2.3; KS = 10; KI = 0.1; S0 = 3.2; B0 = 9; Qin = 0.01; V = 0.5;\n\n B0obs = coeffobs[0]\n S0obs = coeffobs[1]\n Cobs = coeffobs[2:]\n \n # integration numerique de l'EDO\n if control_type in ['PI','PID']: \n x0 = np.array([B0,S0,0,B0obs,S0obs,0])\n indObs = 3\n else: \n x0 = np.array([B0,S0,B0obs,S0obs])\n indObs = 2\n x = scint.odeint(reacteur_obs,x0,temps,args=(k,muast,KS,KI,Qin,V,S0,Sast, \\\n control_type,coeffcontrol,obs,Cobs,disturb))\n if obs == 0:\n u = fonction_u(temps,x[:,0:indObs],Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)\n elif obs == 1:\n u = fonction_u(temps,x[:,indObs:],Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)\n \n plt.figure(figsize = (10, 3))\n plt.subplots_adjust(hspace=0.4,wspace=0.4)\n plt.subplot2grid((1,2),(0,0))\n plt.plot(temps,x[:,0],'r',label='Biomasse')\n plt.plot(temps,x[:,1],'g',label='Substrat')\n plt.plot(temps,x[:,indObs],'r--',label='Biomasse estimée')\n plt.plot(temps,x[:,indObs+1],'g--',label='Substrat estimé')\n plt.plot(np.array([0,temps[-1]]),np.array([Sast,Sast]),'k--',label='S*')\n plt.legend(); plt.xlabel('time (h)')\n \n plt.subplot2grid((1,2),(0,1))\n plt.plot(temps,u,'r',label='Debit')\n plt.legend(); plt.xlabel('time (h)')\n plt.show()\n \n# Contrôle boucle fermée de la culture bactérienne dans un réacteur continu: commande proportionnelle intégrale dérivée\n# ---------------------------------------------------------------------------------------------------------------------\n\n# Loi de commande\ndef fonction_u(t,x,Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol):\n if control_type == 'BO': # BOUCLE OUVERTE\n # loi donnée par Qast=mu(Sast)*V\n Qast = muast*Sast/(KS+Sast+Sast**2/KI)*V\n if type(t)==float: # cas où t est scalaire \n valu = Qast\n else: # cas où t est un vecteur\n valu = np.ones(len(t))*Qast\n elif control_type == 'P': # BOUCLE FERMEE action PROPORTIONNELLE\n # récupération des paramètres de la loi de commande\n kprop = coeffcontrol\n # et de la valeur de S\n if type(t)==float: # cas où t est scalaire \n valS = x[1]\n else: # cas où t est un vecteur\n valS = x[:,1]\n # loi donnée par mu(Sast)*V+kprop*(Sast-S)\n valu = muast*Sast/(KS+Sast+Sast**2/KI)*V+kprop*(Sast-valS) \n elif control_type == 'PI': # BOUCLE FERMEE action PROPORTIONNELLE INTEGRALE\n # récupération des paramètres de la loi de commande\n kprop = coeffcontrol[0]\n kint = coeffcontrol[1]\n # et de la valeur de valint, qui est l'intégrale entre 0 et t de Sast-S\n if type(t)==float: # cas où t est scalaire \n valint = x[2]; valS = x[1]\n else: # cas où t est un vecteur\n valint = x[:,2]; valS = x[:,1]\n # loi donnée par mu(Sast)*V+kprop*(Sast-S) + kint*valint\n # où valint est l'intégrale entre 0 et t de Sast-S\n Qast = muast*Sast/(KS+Sast+Sast**2/KI)*V\n valu = Qast+kprop*(Sast-valS)+kint*valint \n elif control_type == 'PID': # BOUCLE FERMEE action PROPORTIONNELLE INTEGRALE DERIVEE\n # récupération des paramètres de la loi de commande\n kprop = coeffcontrol[0]\n kint = coeffcontrol[1]\n kderiv = coeffcontrol[2]\n # et des valeurs de valint (intégrale de Sast-S), de S et de B\n if type(t)==float: # cas où t est scalaire \n valint = x[2]; valS = x[1]; valB=x[0]\n else: # cas où t est un vecteur\n valint = x[:,2]; valS = x[:,1]; valB = x[:,0]\n # loi donnée par mu(Sast)*V+kprop*(Sast-S) + kint*valint + kderiv*(dSast/dt-dS/dt)\n mu = muast*valS/(KS+valS+valS**2/KI)\n Qast = muast*Sast/(KS+Sast+Sast**2/KI)*V\n valu = (Qast+kprop*(Sast-valS)+kint*valint+kderiv*k*mu*valB)/(1+kderiv/V*(S0-valS))\n return valu",
"Test de l'observateur, mais sans l'intégrer à la loi de commande",
"# Simulation sans utiliser les observations dans la loi de commande: on suppose l'état connu\nculture_cont2(2,'PI',np.array([0.07,0.01]),0,np.array([13,4,-2,0.1]),0.2)",
"Test du couplage de l'observateur avec la loi de commande",
"# Simulation en utilisant les observations dans la loi de commande\nculture_cont2(2,'PI',np.array([0.07,0.01]),1,np.array([13,4,-2,0.1]),0.2)",
"<a name=\"Sat\"> Saturations </a>\nDans le paragraphe précédent, on a vu que, lorsque certaines variables du système n'étaient pas mesurées, il était possible de les estimer grâce à un observateur.\nUn autre problème que l'on peut rencontrer en pratique vient des saturations sur les commandes. En effet, pour des raisons souvent physiques, on ne peut pas appliquer n'importe quelle commande à notre système. Il y a en effet des limites physiques aux valeurs de commande, que l'on peut exprimer de la manière suivante:\n$$ u_{min} ≤ u ≤ u_{max}$$\n\nExemple: Croissance d'une population dans un réacteur continu (suite)\nDans cet exemple la commande étant un débit d'entrée, celle-ci ne pourra pas être négative. Les limites physiques de la pompe impose également une valeur de débit maximale, que l'on notera $Q_{max}$ et que l'on prendra égale à $0.1 L/h$.\nOn a donc:\n$$ 0 ≤ Q ≤ Q_{max}$$\nOr, on voit sur la figure précédente que la loi de commande que l'on a utilisée propose des valeurs de $Q$ négatives et supérieures à $Q_{max}$. C'est donc une loi de commande que l'on ne pourra pas appliquer telle qu'elle au système. \n\nUn première solution à ce problème consiste à appliquer une saturation à la commande calculée $u_{calc}$.\nLa valeur $u_{réelle}$ que l'on appliquera sera déterminée à partir de $u_{calc}$ de la manière suivante:\n$$ u_{réelle} = \\left{ \\begin{array}{ll} \nu_{min} & \\text{si }u_{calc} ≤ u_{min}\\\nu_{calc} & \\text{si }u_{min} < u_{calc} < u_{max}\\\nu_{max} & \\text{si }u_{max} ≤ u_{calc}\\\n\\end{array}\\right.$$",
"def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Qmax,Sast,control_type,coeffcontrol,obs,Cobs,sat,coeffsat,disturb):\n B = x[0] #biomass\n S = x[1] #substrat\n \n if control_type in ['PI','PID']:\n indObs = 3\n Shat = x[indObs+1]\n dx = np.zeros(6)\n dx[2] = Sast-S\n dx[5] = Sast-Shat\n else: \n dx = np.zeros(4)\n indObs = 2\n \n if obs == 0:\n xest = x[0:indObs]\n elif obs == 1:\n xest = x[indObs:]\n \n Q = fonction_u(t,xest,Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)*(1+disturb)\n \n if sat == 1:\n if Q<0: Q=0\n elif Q>Qmax: Q=Qmax\n \n mu = muast*S/(KS+S+S**2/KI)\n dx[0] = mu*B-Q/V*B\n dx[1] = -k*mu*B+Q/V*(S0-S)\n \n Bhat = x[indObs]\n Shat = x[indObs+1]\n muhat = muast*Shat/(KS+Shat+Shat**2/KI)\n dx[indObs] = muhat*Bhat-Q/V*Bhat+Cobs[0]*(muhat*Bhat-mu*B)\n dx[indObs+1] = -k*muhat*Bhat+Q/V*(S0-Shat)+Cobs[1]*(muhat*Bhat-mu*B)\n return dx\n\ndef culture_cont3(Sast,control_type,coeffcontrol,obs,coeffobs,sat,coeffsat,disturb):\n tmax = 100\n temps = np.linspace(0,tmax,2000) # vecteur temps\n\n k = 0.6; muast = 2.3; KS = 10; KI = 0.1; S0 = 3.2; B0 = 9; Qin = 0.01; V = 0.5; Qmax = 0.1;\n\n B0obs = coeffobs[0]\n S0obs = coeffobs[1]\n Cobs = coeffobs[2:]\n \n # integration numerique de l'EDO\n if control_type in ['PI','PID']: \n x0 = np.array([B0,S0,0,B0obs,S0obs,0])\n indObs = 3\n else: \n x0 = np.array([B0,S0,B0obs,S0obs])\n indObs = 2\n x = scint.odeint(reacteur_obs,x0,temps,args=(k,muast,KS,KI,Qin,V,S0,Qmax,Sast, \\\n control_type,coeffcontrol,obs,Cobs,sat,coeffsat,disturb))\n if obs == 0:\n u = fonction_u(temps,x[:,0:indObs],Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)\n elif obs == 1:\n u = fonction_u(temps,x[:,indObs:],Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)\n \n if sat > 0: u = (u<=0)*0+(0<u)*(u<Qmax)*u+(u>Qmax)*Qmax\n\n plt.figure(figsize = (10, 3))\n plt.subplots_adjust(hspace=0.4,wspace=0.4)\n plt.subplot2grid((1,2),(0,0))\n plt.plot(temps,x[:,0],'r',label='Biomasse')\n plt.plot(temps,x[:,1],'g',label='Substrat')\n plt.plot(temps,x[:,indObs],'r--',label='Biomasse estimée')\n plt.plot(temps,x[:,indObs+1],'g--',label='Substrat estimé')\n plt.plot(np.array([0,temps[-1]]),np.array([Sast,Sast]),'k--',label='S*')\n plt.legend(); plt.xlabel('time (h)')\n \n plt.subplot2grid((1,2),(0,1))\n plt.plot(temps,u,'r',label='Debit')\n plt.legend(); plt.xlabel('time (h)')\n plt.show()\n\n# Simulation avec loi de commande PI, observateur et saturation\nculture_cont3(2,'PI',np.array([0.07,0.01]),1,np.array([13,4,-2,0.1]),1,0,0.2)",
"Imposer une saturation sur une commande peut néanmoins dégrader fortement la dynamique du système en boucle fermée, voire carrément déstabiliser le système.\nCela est essentiellement dû au fait que lorsque la commande est saturée, le terme intégral continu de grossir car il continue d'intégrer l'erreur: on parle d'effet \"wind-up\".\n Méthode anti-windup \nPour pallier ce problème, on utilise des techniques d'\"anti-wind-up\" qui consistent en fait à vider le terme intégral (c'est à dire le diminuer) lorsque la commande est saturée. Concrètement, au lieu de calculer le terme intégral \n$$I=K_i\\int_0^t(y^\\ast-y^m(s))ds$$\ncomme présenté précédemment, c'est à dire en résolvant:\n$$ \\frac{dI}{dt}=K_i(y^\\ast-y^m),$$\non va résoudre l'équation:\n$$ \\frac{dI}{dt}=K_i(y^\\ast-y^m)+K_{aw}(u_{réel}-u_{calc})$$.",
"def reacteur_obs(x,t,k,muast,KS,KI,Qin,V,S0,Qmax,Sast,control_type,coeffcontrol,obs,Cobs,sat,coeffsat,disturb):\n B = x[0] #biomass\n S = x[1] #substrat\n \n if control_type in ['PI','PID']:\n indObs = 3\n Shat = x[indObs+1]\n dx = np.zeros(6)\n dx[2] = Sast-S\n dx[5] = Sast-Shat\n else: \n dx = np.zeros(4)\n indObs = 2\n \n if obs == 0:\n xest = x[0:indObs]\n elif obs == 1:\n xest = x[indObs:]\n \n Q = fonction_u(t,xest,Sast,k,muast,KI,KS,V,S0,control_type,coeffcontrol)*(1+disturb)\n \n if sat == 1:\n if Q<0: Q=0\n elif Q>Qmax: Q=Qmax\n elif sat == 2:\n if Q<0: Qr=0\n elif Q>Qmax: Qr=Qmax\n else:\n Qr = Q\n if control_type in ['PI','PID']:\n dx[2] = Sast-S+coeffsat*(Qr-Q)\n dx[5] = Sast-Shat+coeffsat*(Qr-Q)\n Q = Qr\n \n mu = muast*S/(KS+S+S**2/KI)\n dx[0] = mu*B-Q/V*B\n dx[1] = -k*mu*B+Q/V*(S0-S)\n \n Bhat = x[indObs]\n Shat = x[indObs+1]\n muhat = muast*Shat/(KS+Shat+Shat**2/KI)\n dx[indObs] = muhat*Bhat-Q/V*Bhat+Cobs[0]*(muhat*Bhat-mu*B)\n dx[indObs+1] = -k*muhat*Bhat+Q/V*(S0-Shat)+Cobs[1]*(muhat*Bhat-mu*B)\n return dx\n\n# Simulation avec loi de commande PI, observateur et saturation et anti-windup\nculture_cont3(2,'PI',np.array([0.07,0.01]),1,np.array([7,4,-2,0.1]),2,100,0.2)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
james-prior/cohpy
|
20160708-dojo-fibonacci-unroll-for-speed.ipynb
|
mit
|
[
"This plays with optimizing a fibonacci generator function for speed.\nStudy loop unrolling.",
"from itertools import islice",
"First we start with straightforward fibonacci generator function.",
"def fibonacci():\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b\n\nn = 45\n\nknown_good_output = tuple(islice(fibonacci(), n))\n# known_good_output\n\n%timeit sum(islice(fibonacci(), n))",
"Next, we unroll the loop. Note that there are no assignments that just move things around. There is no wasted motion inside the loop.\nIt reminds me of the\n[musical round](https://en.wikipedia.org/wiki/Round_(music)\nThree Blind Mice.",
"def fibonacci():\n a, b = 0, 1\n while True:\n yield a\n c = a + b\n yield b\n a = b + c\n yield c\n b = c + a\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))",
"Next, we unroll the loop more and more to see if that makes the generator faster.",
"def fibonacci():\n a, b = 0, 1\n while True:\n yield a\n c = a + b\n yield b\n a = b + c\n yield c\n b = c + a\n yield a\n c = a + b\n yield b\n a = b + c\n yield c\n b = c + a\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))\n\ndef fibonacci():\n a, b = 0, 1\n yield a\n yield b\n while True:\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))\n\ndef fibonacci():\n a, b = 0, 1\n yield a\n yield b\n while True:\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))\n\ndef fibonacci():\n a, b = 0, 1\n yield a\n yield b\n while True:\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))\n\ndef fibonacci():\n a, b = 0, 1\n yield a\n yield b\n while True:\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n c = a + b\n yield c\n a = b + c\n yield a\n b = c + a\n yield b\n\nassert(known_good_output == tuple(islice(fibonacci(), n)))\n\n%timeit sum(islice(fibonacci(), n))",
"I get significantly different results each time I run the cells in the notebook, so it is unclear how much loop unrolling is good."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tiagoantao/biopython-notebook
|
notebooks/17 - Graphics including GenomeDiagram.ipynb
|
mit
|
[
"Source of the materials: Biopython cookbook (adapted)",
"#Lets load notebook's Image\nfrom IPython.core.display import Image \n\nfrom reportlab.lib import colors\nfrom reportlab.lib.units import cm\nfrom Bio.Graphics import GenomeDiagram\nfrom Bio import SeqIO",
"Graphics including GenomeDiagram\nThe Bio.Graphics module depends on the third party Python library ReportLab. Although focused on producing PDF files, ReportLab can also create encapsulated postscript (EPS) and (SVG) files. In addition to these vector based images, provided certain further dependencies such as the Python Imaging Library (PIL) are installed, ReportLab can also output bitmap images (including JPEG, PNG, GIF, BMP and PICT formats).\nGenomeDiagram\nIntroduction\nThe Bio.Graphics.GenomeDiagram module was added to Biopython 1.50, having previously been available as a separate Python module dependent on Biopython.\nAs the name might suggest, GenomeDiagram was designed for drawing whole genomes, in particular prokaryotic genomes, either as linear diagrams (optionally broken up into fragments to fit better) or as circular wheel diagrams. It proved also well suited to drawing quite detailed figures for smaller genomes such as phage, plasmids or mitochrondia.\nThis module is easiest to use if you have your genome loaded as a SeqRecord object containing lots of SeqFeature objects - for example as loaded from a GenBank file.\nDiagrams, tracks, feature-sets and features\nGenomeDiagram uses a nested set of objects. At the top level, you have a diagram object representing a sequence (or sequence region) along the horizontal axis (or circle). A diagram can contain one or more tracks, shown stacked vertically (or radially on circular diagrams). These will typically all have the same length and represent the same sequence region. You might use one track to show the gene locations, another to show regulatory regions, and a third track to show the GC percentage.\nThe most commonly used type of track will contain features, bundled together in feature-sets. You might choose to use one feature-set for all your CDS features, and another for tRNA features. This isn’t required - they can all go in the same feature-set, but it makes it easier to update the properties of just selected features (e.g. make all the tRNA features red).\nThere are two main ways to build up a complete diagram. Firstly, the top down approach where you create a diagram object, and then using its methods add track(s), and use the track methods to add feature-set(s), and use their methods to add the features. Secondly, you can create the individual objects separately (in whatever order suits your code), and then combine them.\nA top down example\nWe’re going to draw a whole genome from a SeqRecord object read in from a GenBank file. This example uses the pPCP1 plasmid from Yersinia pestis biovar Microtus (<a href=\"data/NC_005816.gb\">NC_005816.gb</a>)",
"record = SeqIO.read(\"data/NC_005816.gb\", \"genbank\")",
"We’re using a top down approach, so after loading in our sequence we next create an empty diagram, then add an (empty) track, and to that add an (empty) feature set:",
"gd_diagram = GenomeDiagram.Diagram(\"Yersinia pestis biovar Microtus plasmid pPCP1\")\ngd_track_for_features = gd_diagram.new_track(1, name=\"Annotated Features\")\ngd_feature_set = gd_track_for_features.new_set()",
"Now the fun part - we take each gene SeqFeature object in our SeqRecord, and use it to generate a feature on the diagram. We’re going to color them blue, alternating between a dark blue and a light blue.",
"for feature in record.features:\n if feature.type != \"gene\":\n #Exclude this feature\n continue\n if len(gd_feature_set) % 2 == 0:\n color = colors.blue\n else:\n color = colors.lightblue\n gd_feature_set.add_feature(feature, color=color, label=True)",
"Now we come to actually making the output file. This happens in two steps, first we call the draw method, which creates all the shapes using ReportLab objects. Then we call the write method which renders these to the requested file format. Note you can output in multiple file formats:",
"gd_diagram.draw(format=\"linear\", orientation=\"landscape\", pagesize='A4',\n fragments=4, start=0, end=len(record))\ngd_diagram.write(\"data/plasmid_linear.png\", \"png\")",
"Lets have a look at the previous one:\n<img src=\"plasmid_linear.png\">\nNotice that the fragments argument which we set to four controls how many pieces the genome gets broken up into.\nIf you want to do a circular figure, then try this:",
"gd_diagram.draw(format=\"circular\", circular=True, pagesize=(20*cm,20*cm),\n start=0, end=len(record), circle_core=0.7)\ngd_diagram.write(\"data/plasmid_circular.png\", \"PNG\")\nImage(\"data/plasmid_circular.png\")",
"These figures are not very exciting, but we’ve only just got started.\nA bottom up example\nNow let’s produce exactly the same figures, but using the bottom up approach. This means we create the different objects directly (and this can be done in almost any order) and then combine them.",
"record = SeqIO.read(\"data/NC_005816.gb\", \"genbank\")\n\n#Create the feature set and its feature objects,\ngd_feature_set = GenomeDiagram.FeatureSet()\nfor feature in record.features:\n if feature.type != \"gene\":\n #Exclude this feature\n continue\n if len(gd_feature_set) % 2 == 0:\n color = colors.blue\n else:\n color = colors.lightblue\n gd_feature_set.add_feature(feature, color=color, label=True)\n#(this for loop is the same as in the previous example)\n\n#Create a track, and a diagram\ngd_track_for_features = GenomeDiagram.Track(name=\"Annotated Features\")\ngd_diagram = GenomeDiagram.Diagram(\"Yersinia pestis biovar Microtus plasmid pPCP1\")\n\n#Now have to glue the bits together...\ngd_track_for_features.add_set(gd_feature_set)\ngd_diagram.add_track(gd_track_for_features, 1)",
"You can now call the draw and write methods as before to produce a linear or circular diagram, using the code at the end of the top-down example above. The figures should be identical.\nFeatures without a SeqFeature\nIn the above example we used a SeqRecord’s SeqFeature objects to build our diagram. Sometimes you won’t have SeqFeature objects, but just the coordinates for a feature you want to draw. You have to create minimal SeqFeature object, but this is easy:",
"from Bio.SeqFeature import SeqFeature, FeatureLocation\nmy_seq_feature = SeqFeature(FeatureLocation(50,100),strand=+1)",
"For strand, use +1 for the forward strand, -1 for the reverse strand, and None for both. Here is a short self contained example:",
"gdd = GenomeDiagram.Diagram('Test Diagram')\ngdt_features = gdd.new_track(1, greytrack=False)\ngds_features = gdt_features.new_set()\n\n#Add three features to show the strand options,\nfeature = SeqFeature(FeatureLocation(25, 125), strand=+1)\ngds_features.add_feature(feature, name=\"Forward\", label=True)\nfeature = SeqFeature(FeatureLocation(150, 250), strand=None)\ngds_features.add_feature(feature, name=\"Strandless\", label=True)\nfeature = SeqFeature(FeatureLocation(275, 375), strand=-1)\ngds_features.add_feature(feature, name=\"Reverse\", label=True)\n\ngdd.draw(format='linear', pagesize=(15*cm,4*cm), fragments=1,\n start=0, end=400)\ngdd.write(\"data/GD_labels_default.png\", \"png\")\nImage(\"data/GD_labels_default.png\")",
"The top part of the image in the next subsection shows the output (in the default feature color, pale green).\nNotice that we have used the name argument here to specify the caption text for these features. This is discussed in more detail next.\nFeature captions\nRecall we used the following (where feature was a SeqFeature object) to add a feature to the diagram:",
"gd_feature_set.add_feature(feature, color=color, label=True)",
"In the example above the SeqFeature annotation was used to pick a sensible caption for the features. By default the following possible entries under the SeqFeature object’s qualifiers dictionary are used: gene, label, name, locus_tag, and product. More simply, you can specify a name directly:",
"gd_feature_set.add_feature(feature, color=color, label=True, name=\"My Gene\")",
"In addition to the caption text for each feature’s label, you can also choose the font, position (this defaults to the start of the sigil, you can also choose the middle or at the end) and orientation (for linear diagrams only, where this defaults to rotated by 45 degrees):",
"#Large font, parallel with the track\ngd_feature_set.add_feature(feature, label=True, color=\"green\",\n label_size=25, label_angle=0)\n\n#Very small font, perpendicular to the track (towards it)\ngd_feature_set.add_feature(feature, label=True, color=\"purple\",\n label_position=\"end\",\n label_size=4, label_angle=90)\n\n#Small font, perpendicular to the track (away from it)\ngd_feature_set.add_feature(feature, label=True, color=\"blue\",\n label_position=\"middle\",\n label_size=6, label_angle=-90)",
"Combining each of these three fragments with the complete example in the previous section should give something like this:",
"gdd.draw(format='linear', pagesize=(15*cm,4*cm), fragments=1,\n start=0, end=400)\ngdd.write(\"data/GD_labels.png\", \"png\")\nImage(\"data/GD_labels.png\")",
"We’ve not shown it here, but you can also set label_color to control the label’s color.\nYou’ll notice the default font is quite small - this makes sense because you will usually be drawing many (small) features on a page, not just a few large ones as shown here.\nFeature sigils\nThe examples above have all just used the default sigil for the feature, a plain box, which was all that was available in the last publicly released standalone version of GenomeDiagram. Arrow sigils were included when GenomeDiagram was added to Biopython 1.50:",
"#Default uses a BOX sigil\ngd_feature_set.add_feature(feature)\n\n#You can make this explicit:\ngd_feature_set.add_feature(feature, sigil=\"BOX\")\n\n#Or opt for an arrow:\ngd_feature_set.add_feature(feature, sigil=\"ARROW\")\n\n#Box with corners cut off (making it an octagon)\ngd_feature_set.add_feature(feature, sigil=\"OCTO\")\n\n#Box with jagged edges (useful for showing breaks in contains)\ngd_feature_set.add_feature(feature, sigil=\"JAGGY\")\n\n#Arrow which spans the axis with strand used only for direction\ngd_feature_set.add_feature(feature, sigil=\"BIGARROW\")",
"These are shown below. Most sigils fit into a bounding box (as given by the default BOX sigil), either above or below the axis for the forward or reverse strand, or straddling it (double the height) for strand-less features. The BIGARROW sigil is different, always straddling the axis with the direction taken from the feature’s stand.\nArrow sigils\nWe introduced the arrow sigils in the previous section. There are two additional options to adjust the shapes of the arrows, firstly the thickness of the arrow shaft, given as a proportion of the height of the bounding box:",
"#Full height shafts, giving pointed boxes:\ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"brown\",\n arrowshaft_height=1.0)\n#Or, thin shafts: \ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"teal\",\n arrowshaft_height=0.2)\n#Or, very thin shafts:\ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"darkgreen\",\n arrowshaft_height=0.1)",
"The results are shown below:\nSecondly, the length of the arrow head - given as a proportion of the height of the bounding box (defaulting to 0.5, or 50%):",
"#Short arrow heads:\ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"blue\",\n arrowhead_length=0.25)\n#Or, longer arrow heads:\ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"orange\",\n arrowhead_length=1)\n#Or, very very long arrow heads (i.e. all head, no shaft, so triangles):\ngd_feature_set.add_feature(feature, sigil=\"ARROW\", color=\"red\",\n arrowhead_length=10000)",
"The results are shown below:\nBiopython 1.61 adds a new BIGARROW sigil which always stradles the axis, pointing left for the reverse strand or right otherwise:",
"#A large arrow straddling the axis:\ngd_feature_set.add_feature(feature, sigil=\"BIGARROW\")",
"All the shaft and arrow head options shown above for the ARROW sigil can be used for the BIGARROW sigil too.\nA nice example\nNow let’s return to the pPCP1 plasmid from Yersinia pestis biovar Microtus, and the top down approach used above, but take advantage of the sigil options we’ve now discussed. This time we’ll use arrows for the genes, and overlay them with strand-less features (as plain boxes) showing the position of some restriction digest sites.",
"record = SeqIO.read(\"data/NC_005816.gb\", \"genbank\")\n\ngd_diagram = GenomeDiagram.Diagram(record.id)\ngd_track_for_features = gd_diagram.new_track(1, name=\"Annotated Features\")\ngd_feature_set = gd_track_for_features.new_set()\n\nfor feature in record.features:\n if feature.type != \"gene\":\n #Exclude this feature\n continue\n if len(gd_feature_set) % 2 == 0:\n color = colors.blue\n else:\n color = colors.lightblue\n gd_feature_set.add_feature(feature, sigil=\"ARROW\",\n color=color, label=True,\n label_size = 14, label_angle=0)\n\n#I want to include some strandless features, so for an example\n#will use EcoRI recognition sites etc.\nfor site, name, color in [(\"GAATTC\",\"EcoRI\",colors.green),\n (\"CCCGGG\",\"SmaI\",colors.orange),\n (\"AAGCTT\",\"HindIII\",colors.red),\n (\"GGATCC\",\"BamHI\",colors.purple)]:\n index = 0\n while True:\n index = record.seq.find(site, start=index)\n if index == -1 : break\n feature = SeqFeature(FeatureLocation(index, index+len(site)))\n gd_feature_set.add_feature(feature, color=color, name=name,\n label=True, label_size = 10,\n label_color=color)\n index += len(site)\n\ngd_diagram.draw(format=\"linear\", pagesize='A4', fragments=4,\n start=0, end=len(record))\ngd_diagram.write(\"data/plasmid_linear_nice.png\", \"png\")\n\nImage(\"data/plasmid_linear_nice.png\")\n\ngd_diagram.draw(format=\"circular\", circular=True, pagesize=(20*cm,20*cm),\n start=0, end=len(record), circle_core = 0.5)\ngd_diagram.write(\"data/plasmid_circular_nice.png\", \"png\")\n\nImage(\"data/plasmid_circular_nice.png\")",
"Multiple tracks\nAll the examples so far have used a single track, but you can have more than one track – for example show the genes on one, and repeat regions on another. In this example we’re going to show three phage genomes side by side to scale, inspired by Figure 6 in Proux et al. (2002). We’ll need the GenBank files for the following three phage:\n\nNC_002703 – Lactococcus phage Tuc2009, complete genome (38347 bp)\nAF323668 – Bacteriophage bIL285, complete genome (35538 bp)\nNC_003212 – Listeria innocua Clip11262, complete genome, of which we are focussing only on integrated prophage 5 (similar length).\n\nYou can download these using Entrez if you like. For the third record we’ve worked out where the phage is integrated into the genome, and slice the record to extract it, and must also reverse complement to match the orientation of the first two phage:",
"A_rec = SeqIO.read(\"data/NC_002703.gbk\", \"gb\")\nB_rec = SeqIO.read(\"data/AF323668.gbk\", \"gb\")\n",
"The figure we are imitating used different colors for different gene functions. One way to do this is to edit the GenBank file to record color preferences for each feature - something Sanger’s Artemis editor does, and which GenomeDiagram should understand. Here however, we’ll just hard code three lists of colors.\nNote that the annotation in the GenBank files doesn’t exactly match that shown in Proux et al., they have drawn some unannotated genes.",
"from reportlab.lib.colors import red, grey, orange, green, brown, blue, lightblue, purple\n\nA_colors = [red]*5 + [grey]*7 + [orange]*2 + [grey]*2 + [orange] + [grey]*11 + [green]*4 \\\n + [grey] + [green]*2 + [grey, green] + [brown]*5 + [blue]*4 + [lightblue]*5 \\\n + [grey, lightblue] + [purple]*2 + [grey]\nB_colors = [red]*6 + [grey]*8 + [orange]*2 + [grey] + [orange] + [grey]*21 + [green]*5 \\\n + [grey] + [brown]*4 + [blue]*3 + [lightblue]*3 + [grey]*5 + [purple]*2\n",
"Now to draw them – this time we add three tracks to the diagram, and also notice they are given different start/end values to reflect their different lengths.",
"name = \"data/Proux Fig 6\"\ngd_diagram = GenomeDiagram.Diagram(name)\nmax_len = 0\nfor record, gene_colors in zip([A_rec, B_rec], [A_colors, B_colors]):\n max_len = max(max_len, len(record))\n gd_track_for_features = gd_diagram.new_track(1,\n name=record.name,\n greytrack=True,\n start=0, end=len(record))\n gd_feature_set = gd_track_for_features.new_set()\n\n i = 0\n for feature in record.features:\n if feature.type != \"gene\":\n #Exclude this feature \n continue\n gd_feature_set.add_feature(feature, sigil=\"ARROW\",\n color=gene_colors[i], label=True,\n name = str(i+1),\n label_position=\"start\",\n label_size = 6, label_angle=0)\n i+=1\n\ngd_diagram.draw(format=\"linear\", pagesize='A4', fragments=1,\n start=0, end=max_len)\ngd_diagram.write(name + \".png\", \"png\")\nImage(name + \".png\")",
"I did wonder why in the original manuscript there were no red or orange genes marked in the bottom phage. Another important point is here the phage are shown with different lengths - this is because they are all drawn to the same scale (they are different lengths).\nThe key difference from the published figure is they have color-coded links between similar proteins – which is what we will do in the next section.\nCross-Links between tracks\nBiopython 1.59 added the ability to draw cross links between tracks - both simple linear diagrams as we will show here, but also linear diagrams split into fragments and circular diagrams.\nContinuing the example from the previous section inspired by Figure 6 from Proux et al. 2002, we would need a list of cross links between pairs of genes, along with a score or color to use. Realistically you might extract this from a BLAST file computationally, but here I have manually typed them in.\nMy naming convention continues to refer to the three phage as A, B and C. Here are the links we want to show between A and B, given as a list of tuples (percentage similarity score, gene in A, gene in B).",
"#Tuc2009 (NC_002703) vs bIL285 (AF323668)\nA_vs_B = [\n (99, \"Tuc2009_01\", \"int\"),\n (33, \"Tuc2009_03\", \"orf4\"),\n (94, \"Tuc2009_05\", \"orf6\"),\n (100,\"Tuc2009_06\", \"orf7\"),\n (97, \"Tuc2009_07\", \"orf8\"),\n (98, \"Tuc2009_08\", \"orf9\"),\n (98, \"Tuc2009_09\", \"orf10\"),\n (100,\"Tuc2009_10\", \"orf12\"),\n (100,\"Tuc2009_11\", \"orf13\"),\n (94, \"Tuc2009_12\", \"orf14\"),\n (87, \"Tuc2009_13\", \"orf15\"),\n (94, \"Tuc2009_14\", \"orf16\"),\n (94, \"Tuc2009_15\", \"orf17\"),\n (88, \"Tuc2009_17\", \"rusA\"),\n (91, \"Tuc2009_18\", \"orf20\"),\n (93, \"Tuc2009_19\", \"orf22\"),\n (71, \"Tuc2009_20\", \"orf23\"),\n (51, \"Tuc2009_22\", \"orf27\"),\n (97, \"Tuc2009_23\", \"orf28\"),\n (88, \"Tuc2009_24\", \"orf29\"),\n (26, \"Tuc2009_26\", \"orf38\"),\n (19, \"Tuc2009_46\", \"orf52\"),\n (77, \"Tuc2009_48\", \"orf54\"),\n (91, \"Tuc2009_49\", \"orf55\"),\n (95, \"Tuc2009_52\", \"orf60\"), \n]",
"For the first and last phage these identifiers are locus tags, for the middle phage there are no locus tags so I’ve used gene names instead. The following little helper function lets us lookup a feature using either a locus tag or gene name:",
"def get_feature(features, id, tags=[\"locus_tag\", \"gene\"]):\n \"\"\"Search list of SeqFeature objects for an identifier under the given tags.\"\"\"\n for f in features:\n for key in tags:\n #tag may not be present in this feature \n for x in f.qualifiers.get(key, []):\n if x == id:\n return f\n raise KeyError(id)",
"We can now turn those list of identifier pairs into SeqFeature pairs, and thus find their location co-ordinates. We can now add all that code and the following snippet to the previous example (just before the gd_diagram.draw(...) line – see the finished example script <a href=\"data/Proux_et_al_2002_Figure_6.py\">Proux_et_al_2002_Figure_6.py</a> included in the Doc/examples folder of the Biopython source code) to add cross links to the figure:",
"from Bio.Graphics.GenomeDiagram import CrossLink\nfrom reportlab.lib import colors\n#Note it might have been clearer to assign the track numbers explicitly... \nfor rec_X, tn_X, rec_Y, tn_Y, X_vs_Y in [(A_rec, 2, B_rec, 1, A_vs_B)]:\n track_X = gd_diagram.tracks[tn_X]\n track_Y = gd_diagram.tracks[tn_Y]\n for score, id_X, id_Y in X_vs_Y:\n feature_X = get_feature(rec_X.features, id_X)\n feature_Y = get_feature(rec_Y.features, id_Y)\n color = colors.linearlyInterpolatedColor(colors.white, colors.firebrick, 0, 100, score)\n link_xy = CrossLink((track_X, feature_X.location.start, feature_X.location.end),\n (track_Y, feature_Y.location.start, feature_Y.location.end),\n color, colors.lightgrey)\n gd_diagram.cross_track_links.append(link_xy)\ngd_diagram.draw(format=\"linear\", pagesize='A4', fragments=1,\n start=0, end=max_len)\ngd_diagram.write(\"data/cross.png\", \"png\")\nImage(\"data/cross.png\")",
"There are several important pieces to this code. First the GenomeDiagram object has a cross_track_links attribute which is just a list of CrossLink objects. Each CrossLink object takes two sets of track-specific co-ordinates (here given as tuples, you can alternatively use a GenomeDiagram.Feature object instead). You can optionally supply a colour, border color, and say if this link should be drawn flipped (useful for showing inversions).\nYou can also see how we turn the BLAST percentage identity score into a colour, interpolating between white (0%) and a dark red (100%). In this example we don’t have any problems with overlapping cross-links. One way to tackle that is to use transparency in ReportLab, by using colors with their alpha channel set. However, this kind of shaded color scheme combined with overlap transparency would be difficult to interpret. The result:\nThere is still a lot more that can be done within Biopython to help improve this figure. First of all, the cross links in this case are between proteins which are drawn in a strand specific manor. It can help to add a background region (a feature using the ‘BOX’ sigil) on the feature track to extend the cross link. Also, we could reduce the vertical height of the feature tracks to allocate more to the links instead – one way to do that is to allocate space for empty tracks. Furthermore, in cases like this where there are no large gene overlaps, we can use the axis-straddling BIGARROW sigil, which allows us to further reduce the vertical space needed for the track. These improvements are demonstrated in the example script <a href=\"data/Proux_et_al_2002_Figure_6.py\">Proux_et_al_2002_Figure_6.py</a>.\nBeyond that, finishing touches you might want to do manually in a vector image editor include fine tuning the placement of gene labels, and adding other custom annotation such as highlighting particular regions.\nAlthough not really necessary in this example since none of the cross-links overlap, using a transparent color in ReportLab is a very useful technique for superimposing multiple links. However, in this case a shaded color scheme should be avoided.\nChromosomes\nThe Bio.Graphics.BasicChromosome module allows drawing of chromosomes. There is an example in Jupe et al. (2012) (open access) using colors to highlight different gene families.\nSimple Chromosomes\nImportant: To continue this example you have first to download a few chromosomes from Arabidopsis thaliana, the code to help you is here:\nVery important: This is slow and clogs the network, you only need to do this once (even if you close the notebook as the download will be persistent)",
"from ftplib import FTP\nftp = FTP('ftp.ncbi.nlm.nih.gov')\nprint(\"Logging in\")\nftp.login()\nftp.cwd('genomes/archive/old_genbank/A_thaliana/OLD/')\nprint(\"Starting download - This can be slow!\")\nfor chro, name in [\n (\"CHR_I\", \"NC_003070.fna\"), (\"CHR_I\", \"NC_003070.gbk\"),\n (\"CHR_II\", \"NC_003071.fna\"), (\"CHR_II\", \"NC_003071.gbk\"),\n (\"CHR_III\", \"NC_003072.fna\"), (\"CHR_III\", \"NC_003072.gbk\"),\n (\"CHR_IV\", \"NC_003073.fna\"), (\"CHR_IV\", \"NC_003073.gbk\"),\n (\"CHR_V\", \"NC_003074.fna\"), (\"CHR_V\", \"NC_003074.gbk\")]:\n print(\"Downloading\", chro, name)\n ftp.retrbinary('RETR %s/%s' % (chro, name), open('data/%s' % name, 'wb').write)\nftp.quit()\nprint('Done')",
"Here is a very simple example - for which we’ll use Arabidopsis thaliana.\nYou can skip this bit, but first I downloaded the five sequenced chromosomes from the NCBI’s FTP site (per the code above) and then parsed them with Bio.SeqIO to find out their lengths. You could use the GenBank files for this, but it is faster to use the FASTA files for the whole chromosomes:",
"from Bio import SeqIO\nentries = [(\"Chr I\", \"NC_003070.fna\"),\n (\"Chr II\", \"NC_003071.fna\"),\n (\"Chr III\", \"NC_003072.fna\"),\n (\"Chr IV\", \"NC_003073.fna\"),\n (\"Chr V\", \"NC_003074.fna\")]\nfor (name, filename) in entries:\n record = SeqIO.read(\"data/\" + filename, \"fasta\")\n print(name, len(record))",
"This gave the lengths of the five chromosomes, which we’ll now use in the following short demonstration of the BasicChromosome module:",
"from reportlab.lib.units import cm\nfrom Bio.Graphics import BasicChromosome\n\nentries = [(\"Chr I\", 30432563),\n (\"Chr II\", 19705359),\n (\"Chr III\", 23470805),\n (\"Chr IV\", 18585042),\n (\"Chr V\", 26992728)]\n\nmax_len = 30432563 #Could compute this\ntelomere_length = 1000000 #For illustration\n \nchr_diagram = BasicChromosome.Organism(output_format=\"png\")\nchr_diagram.page_size = (29.7*cm, 21*cm) #A4 landscape\n\nfor name, length in entries:\n cur_chromosome = BasicChromosome.Chromosome(name)\n #Set the scale to the MAXIMUM length plus the two telomeres in bp,\n #want the same scale used on all five chromosomes so they can be\n #compared to each other\n cur_chromosome.scale_num = max_len + 2 * telomere_length\n\n #Add an opening telomere\n start = BasicChromosome.TelomereSegment()\n start.scale = telomere_length\n cur_chromosome.add(start)\n\n #Add a body - using bp as the scale length here.\n body = BasicChromosome.ChromosomeSegment()\n body.scale = length\n cur_chromosome.add(body)\n\n #Add a closing telomere\n end = BasicChromosome.TelomereSegment(inverted=True)\n end.scale = telomere_length\n cur_chromosome.add(end)\n\n #This chromosome is done\n chr_diagram.add(cur_chromosome)\n\nchr_diagram.draw(\"data/simple_chrom.png\", \"Arabidopsis thaliana\")\nImage(\"data/simple_chrom.png\")",
"This example is deliberately short and sweet. The next example shows the location of features of interest.\nContinuing from the previous example, let’s also show the tRNA genes. We’ll get their locations by parsing the GenBank files for the five Arabidopsis thaliana chromosomes. You’ll need to download these files from the NCBI FTP site.",
"entries = [(\"Chr I\", \"NC_003070.gbk\"),\n (\"Chr II\", \"NC_003071.gbk\"),\n (\"Chr III\", \"NC_003072.gbk\"),\n (\"Chr IV\", \"NC_003073.gbk\"),\n (\"Chr V\", \"NC_003074.gbk\")]\n\nmax_len = 30432563 #Could compute this\ntelomere_length = 1000000 #For illustration\n\nchr_diagram = BasicChromosome.Organism(output_format=\"png\")\nchr_diagram.page_size = (29.7*cm, 21*cm) #A4 landscape\n\nfor index, (name, filename) in enumerate(entries):\n record = SeqIO.read(\"data/\" + filename,\"genbank\")\n length = len(record)\n features = [f for f in record.features if f.type==\"tRNA\"]\n #Record an Artemis style integer color in the feature's qualifiers,\n #1 = Black, 2 = Red, 3 = Green, 4 = blue, 5 =cyan, 6 = purple \n for f in features: f.qualifiers[\"color\"] = [index+2]\n\n cur_chromosome = BasicChromosome.Chromosome(name)\n #Set the scale to the MAXIMUM length plus the two telomeres in bp,\n #want the same scale used on all five chromosomes so they can be\n #compared to each other\n cur_chromosome.scale_num = max_len + 2 * telomere_length\n\n #Add an opening telomere\n start = BasicChromosome.TelomereSegment()\n start.scale = telomere_length\n cur_chromosome.add(start)\n\n #Add a body - again using bp as the scale length here.\n body = BasicChromosome.AnnotatedChromosomeSegment(length, features)\n body.scale = length\n cur_chromosome.add(body)\n\n #Add a closing telomere\n end = BasicChromosome.TelomereSegment(inverted=True)\n end.scale = telomere_length\n cur_chromosome.add(end)\n\n #This chromosome is done\n chr_diagram.add(cur_chromosome)\n\nchr_diagram.draw(\"data/tRNA_chrom.png\", \"Arabidopsis thaliana\")\nImage(\"data/tRNA_chrom.png\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
greg-ashby/deep-learning-nanodegree
|
first-neural-network/Your_first_neural_network.ipynb
|
mit
|
[
"Your first neural network\nIn this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.",
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt",
"Load and prepare the data\nA critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!",
"data_path = 'Bike-Sharing-Dataset/hour.csv'\n\nrides = pd.read_csv(data_path)\n\nrides.head()",
"Checking out the data\nThis dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.\nBelow is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.",
"rides[:24*10].plot(x='dteday', y='cnt')",
"Dummy variables\nHere we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies().",
"dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']\nfor each in dummy_fields:\n dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)\n rides = pd.concat([rides, dummies], axis=1)\n\nfields_to_drop = ['instant', 'dteday', 'season', 'weathersit', \n 'weekday', 'atemp', 'mnth', 'workingday', 'hr']\ndata = rides.drop(fields_to_drop, axis=1)\ndata.head()",
"Scaling target variables\nTo make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.\nThe scaling factors are saved so we can go backwards when we use the network for predictions.",
"quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']\n# Store scalings in a dictionary so we can convert back later\nscaled_features = {}\nfor each in quant_features:\n mean, std = data[each].mean(), data[each].std()\n scaled_features[each] = [mean, std]\n data.loc[:, each] = (data[each] - mean)/std",
"Splitting the data into training, testing, and validation sets\nWe'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.",
"# Save data for approximately the last 21 days \ntest_data = data[-21*24:]\n\n# Now remove the test data from the data set \ndata = data[:-21*24]\n\n# Separate the data into features and targets\ntarget_fields = ['cnt', 'casual', 'registered']\nfeatures, targets = data.drop(target_fields, axis=1), data[target_fields]\ntest_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]",
"We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).",
"# Hold out the last 60 days or so of the remaining data as a validation set\ntrain_features, train_targets = features[:-60*24], targets[:-60*24]\nval_features, val_targets = features[-60*24:], targets[-60*24:]",
"Time to build the network\nBelow you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.\n<img src=\"assets/neural_network.png\" width=300px>\nThe network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.\nWe use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.\n\nHint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.\n\nBelow, you have these tasks:\n1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.\n2. Implement the forward pass in the train method.\n3. Implement the backpropagation algorithm in the train method, including calculating the output error.\n4. Implement the forward pass in the run method.",
"class NeuralNetwork(object):\n def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):\n # Set number of nodes in input, hidden and output layers.\n self.input_nodes = input_nodes\n self.hidden_nodes = hidden_nodes\n self.output_nodes = output_nodes\n\n # Initialize weights\n self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, \n (self.input_nodes, self.hidden_nodes))\n\n self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, \n (self.hidden_nodes, self.output_nodes))\n self.lr = learning_rate\n \n #### TODO: Set self.activation_function to your implemented sigmoid function ####\n #\n # Note: in Python, you can define a function with a lambda expression,\n # as shown below.\n self.activation_function = lambda x : 0 # Replace 0 with your sigmoid calculation.\n \n ### If the lambda code above is not something you're familiar with,\n # You can uncomment out the following three lines and put your \n # implementation there instead.\n #\n #def sigmoid(x):\n # return 0 # Replace 0 with your sigmoid calculation here\n #self.activation_function = sigmoid\n \n \n def train(self, features, targets):\n ''' Train the network on batch of features and targets. \n \n Arguments\n ---------\n \n features: 2D array, each row is one data record, each column is a feature\n targets: 1D array of target values\n \n '''\n n_records = features.shape[0]\n delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)\n delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)\n for X, y in zip(features, targets):\n #### Implement the forward pass here ####\n ### Forward pass ###\n # TODO: Hidden layer - Replace these values with your calculations.\n hidden_inputs = None # signals into hidden layer\n hidden_outputs = None # signals from hidden layer\n\n # TODO: Output layer - Replace these values with your calculations.\n final_inputs = None # signals into final output layer\n final_outputs = None # signals from final output layer\n \n #### Implement the backward pass here ####\n ### Backward pass ###\n\n # TODO: Output error - Replace this value with your calculations.\n error = None # Output layer error is the difference between desired target and actual output.\n \n # TODO: Calculate the hidden layer's contribution to the error\n hidden_error = None\n \n # TODO: Backpropagated error terms - Replace these values with your calculations.\n output_error_term = None\n hidden_error_term = None\n\n # Weight step (input to hidden)\n delta_weights_i_h += None\n # Weight step (hidden to output)\n delta_weights_h_o += None\n\n # TODO: Update the weights - Replace these values with your calculations.\n self.weights_hidden_to_output += None # update hidden-to-output weights with gradient descent step\n self.weights_input_to_hidden += None # update input-to-hidden weights with gradient descent step\n \n def run(self, features):\n ''' Run a forward pass through the network with input features \n \n Arguments\n ---------\n features: 1D array of feature values\n '''\n \n #### Implement the forward pass here ####\n # TODO: Hidden layer - replace these values with the appropriate calculations.\n hidden_inputs = None # signals into hidden layer\n hidden_outputs = None # signals from hidden layer\n \n # TODO: Output layer - Replace these values with the appropriate calculations.\n final_inputs = None # signals into final output layer\n final_outputs = None # signals from final output layer \n \n return final_outputs\n\ndef MSE(y, Y):\n return np.mean((y-Y)**2)",
"Unit tests\nRun these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.",
"import unittest\n\ninputs = np.array([[0.5, -0.2, 0.1]])\ntargets = np.array([[0.4]])\ntest_w_i_h = np.array([[0.1, -0.2],\n [0.4, 0.5],\n [-0.3, 0.2]])\ntest_w_h_o = np.array([[0.3],\n [-0.1]])\n\nclass TestMethods(unittest.TestCase):\n \n ##########\n # Unit tests for data loading\n ##########\n \n def test_data_path(self):\n # Test that file path to dataset has been unaltered\n self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')\n \n def test_data_loaded(self):\n # Test that data frame loaded\n self.assertTrue(isinstance(rides, pd.DataFrame))\n \n ##########\n # Unit tests for network functionality\n ##########\n\n def test_activation(self):\n network = NeuralNetwork(3, 2, 1, 0.5)\n # Test that the activation function is a sigmoid\n self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))\n\n def test_train(self):\n # Test that weights are updated correctly on training\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n \n network.train(inputs, targets)\n self.assertTrue(np.allclose(network.weights_hidden_to_output, \n np.array([[ 0.37275328], \n [-0.03172939]])))\n self.assertTrue(np.allclose(network.weights_input_to_hidden,\n np.array([[ 0.10562014, -0.20185996], \n [0.39775194, 0.50074398], \n [-0.29887597, 0.19962801]])))\n\n def test_run(self):\n # Test correctness of run method\n network = NeuralNetwork(3, 2, 1, 0.5)\n network.weights_input_to_hidden = test_w_i_h.copy()\n network.weights_hidden_to_output = test_w_h_o.copy()\n\n self.assertTrue(np.allclose(network.run(inputs), 0.09998924))\n\nsuite = unittest.TestLoader().loadTestsFromModule(TestMethods())\nunittest.TextTestRunner().run(suite)",
"Training the network\nHere you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.\nYou'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.\nChoose the number of iterations\nThis is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.\nChoose the learning rate\nThis scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.\nChoose the number of hidden nodes\nThe more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.",
"import sys\n\n### Set the hyperparameters here ###\niterations = 100\nlearning_rate = 0.1\nhidden_nodes = 2\noutput_nodes = 1\n\nN_i = train_features.shape[1]\nnetwork = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)\n\nlosses = {'train':[], 'validation':[]}\nfor ii in range(iterations):\n # Go through a random batch of 128 records from the training data set\n batch = np.random.choice(train_features.index, size=128)\n X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']\n \n network.train(X, y)\n \n # Printing out the training progress\n train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)\n val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)\n sys.stdout.write(\"\\rProgress: {:2.1f}\".format(100 * ii/float(iterations)) \\\n + \"% ... Training loss: \" + str(train_loss)[:5] \\\n + \" ... Validation loss: \" + str(val_loss)[:5])\n sys.stdout.flush()\n \n losses['train'].append(train_loss)\n losses['validation'].append(val_loss)\n\nplt.plot(losses['train'], label='Training loss')\nplt.plot(losses['validation'], label='Validation loss')\nplt.legend()\n_ = plt.ylim()",
"Check out your predictions\nHere, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.",
"fig, ax = plt.subplots(figsize=(8,4))\n\nmean, std = scaled_features['cnt']\npredictions = network.run(test_features).T*std + mean\nax.plot(predictions[0], label='Prediction')\nax.plot((test_targets['cnt']*std + mean).values, label='Data')\nax.set_xlim(right=len(predictions))\nax.legend()\n\ndates = pd.to_datetime(rides.ix[test_data.index]['dteday'])\ndates = dates.apply(lambda d: d.strftime('%b %d'))\nax.set_xticks(np.arange(len(dates))[12::24])\n_ = ax.set_xticklabels(dates[12::24], rotation=45)",
"OPTIONAL: Thinking about your results(this question will not be evaluated in the rubric).\nAnswer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?\n\nNote: You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter\n\nYour answer below"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
nick-youngblut/SIPSim
|
ipynb/bac_genome/fullCyc/Day1_fullDataset/rep10_noPCR.ipynb
|
mit
|
[
"TODO: rerun; DBL default changed\nGoal\n\nExtension of Day1_rep10 simulations: subsampling OTU table without performing PCR simulation first\nSeeing how this affects the abundance distribution of the overlapping taxa in the dataset\n\nInit",
"import os\nimport glob\nimport re\nimport nestly\n\n%load_ext rpy2.ipython\n%load_ext pushnote\n\n%%R\nlibrary(ggplot2)\nlibrary(dplyr)\nlibrary(tidyr)\nlibrary(gridExtra)\nlibrary(phyloseq)\n\n## BD for G+C of 0 or 100\nBD.GCp0 = 0 * 0.098 + 1.66\nBD.GCp100 = 1 * 0.098 + 1.66",
"Nestly\n\nassuming fragments already simulated\nassuming Day1_rep10 notebook already ran",
"workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/'\nbuildDir = os.path.join(workDir, 'Day1_rep10')\nR_dir = '/home/nick/notebook/SIPSim/lib/R/'\n\nfragFile= '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags.pkl'\ntargetFile = '/home/nick/notebook/SIPSim/dev/fullCyc/CD-HIT/target_taxa.txt'\n\nphyseqDir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'\nphyseq_bulkCore = 'bulk-core'\nphyseq_SIP_core = 'SIP-core_unk'\n\nnreps = 10\nprefrac_comm_abundance = '1e9'\n\nseq_per_fraction = ['lognormal', 9.432, 0.5, 10000, 30000] # dist, mean, scale, min, max\nbulk_days = [1]\nnprocs = 12\n\n# building tree structure\nnest = nestly.Nest()\n\n## varying params\nnest.add('rep', [x + 1 for x in xrange(nreps)])\n\n## set params\nnest.add('bulk_day', bulk_days, create_dir=False)\nnest.add('abs', [prefrac_comm_abundance], create_dir=False)\nnest.add('percIncorp', [0], create_dir=False)\nnest.add('percTaxa', [0], create_dir=False)\nnest.add('np', [nprocs], create_dir=False)\nnest.add('subsample_dist', [seq_per_fraction[0]], create_dir=False)\nnest.add('subsample_mean', [seq_per_fraction[1]], create_dir=False)\nnest.add('subsample_scale', [seq_per_fraction[2]], create_dir=False)\nnest.add('subsample_min', [seq_per_fraction[3]], create_dir=False)\nnest.add('subsample_max', [seq_per_fraction[4]], create_dir=False)\n\n### input/output files\nnest.add('buildDir', [buildDir], create_dir=False)\nnest.add('R_dir', [R_dir], create_dir=False)\nnest.add('fragFile', [fragFile], create_dir=False)\nnest.add('targetFile', [targetFile], create_dir=False)\nnest.add('physeqDir', [physeqDir], create_dir=False)\nnest.add('physeq_bulkCore', [physeq_bulkCore], create_dir=False)\n\n\n# building directory tree\nnest.build(buildDir)\n\n# bash file to run\nbashFile = os.path.join(buildDir, 'SIPSimRun.sh')\n\n%%writefile $bashFile\n#!/bin/bash\n\nexport PATH={R_dir}:$PATH\n \n \necho '# subsampling from the OTU table (simulating sequencing of the DNA pool)'\nSIPSim OTU_subsample \\\n --dist {subsample_dist} \\\n --dist_params mean:{subsample_mean},sigma:{subsample_scale} \\\n --min_size {subsample_min} \\\n --max_size {subsample_max} \\\n OTU_abs{abs}.txt \\\n > OTU_abs{abs}_sub.txt\n \necho '# making a wide-formatted table'\nSIPSim OTU_wideLong -w \\\n OTU_abs{abs}_sub.txt \\\n > OTU_abs{abs}_sub_w.txt\n \necho '# making metadata (phyloseq: sample_data)'\nSIPSim OTU_sampleData \\\n OTU_abs{abs}_sub.txt \\\n > OTU_abs{abs}_sub_meta.txt\n\n!chmod 777 $bashFile\n!cd $workDir; \\\n nestrun --template-file $bashFile -d Day1_rep10 --log-file log.txt -j 2",
"BD min/max\n\nwhat is the min/max BD that we care about?",
"%%R\n## min G+C cutoff\nmin_GC = 13.5\n## max G+C cutoff\nmax_GC = 80\n## max G+C shift\nmax_13C_shift_in_BD = 0.036\n\n\nmin_BD = min_GC/100.0 * 0.098 + 1.66 \nmax_BD = max_GC/100.0 * 0.098 + 1.66 \n\nmax_BD = max_BD + max_13C_shift_in_BD\n\ncat('Min BD:', min_BD, '\\n')\ncat('Max BD:', max_BD, '\\n')",
"Loading data\nEmperical\nSIP data",
"%%R -i physeqDir -i physeq_SIP_core -i bulk_days\n\n# bulk core samples\nF = file.path(physeqDir, physeq_SIP_core)\nphyseq.SIP.core = readRDS(F) \nphyseq.SIP.core.m = physeq.SIP.core %>% sample_data\n\nphyseq.SIP.core = prune_samples(physeq.SIP.core.m$Substrate == '12C-Con' & \n physeq.SIP.core.m$Day %in% bulk_days, \n physeq.SIP.core) %>%\n filter_taxa(function(x) sum(x) > 0, TRUE)\nphyseq.SIP.core.m = physeq.SIP.core %>% sample_data \n\nphyseq.SIP.core\n\n%%R \n\n## dataframe\ndf.EMP = physeq.SIP.core %>% otu_table %>%\n as.matrix %>% as.data.frame\ndf.EMP$OTU = rownames(df.EMP)\ndf.EMP = df.EMP %>% \n gather(sample, abundance, 1:(ncol(df.EMP)-1)) \n\ndf.EMP = inner_join(df.EMP, physeq.SIP.core.m, c('sample' = 'X.Sample')) \n\ndf.EMP.nt = df.EMP %>%\n group_by(sample) %>%\n mutate(n_taxa = sum(abundance > 0)) %>%\n ungroup() %>%\n distinct(sample) %>%\n filter(Buoyant_density >= min_BD, \n Buoyant_density <= max_BD)\n \ndf.EMP.nt %>% head(n=3)",
"bulk soil samples",
"%%R\nphyseq.dir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'\nphyseq.bulk = 'bulk-core'\nphyseq.file = file.path(physeq.dir, physeq.bulk)\nphyseq.bulk = readRDS(physeq.file)\nphyseq.bulk.m = physeq.bulk %>% sample_data\nphyseq.bulk = prune_samples(physeq.bulk.m$Exp_type == 'microcosm_bulk' &\n physeq.bulk.m$Day %in% bulk_days, physeq.bulk)\n\nphyseq.bulk.m = physeq.bulk %>% sample_data\nphyseq.bulk\n\n%%R\nphyseq.bulk.n = transform_sample_counts(physeq.bulk, function(x) x/sum(x))\nphyseq.bulk.n\n\n%%R\n# making long format of each bulk table\nbulk.otu = physeq.bulk.n %>% otu_table %>% as.data.frame\nncol = ncol(bulk.otu)\nbulk.otu$OTU = rownames(bulk.otu)\nbulk.otu = bulk.otu %>%\n gather(sample, abundance, 1:ncol) \n\nbulk.otu = inner_join(physeq.bulk.m, bulk.otu, c('X.Sample' = 'sample')) %>%\n dplyr::select(OTU, abundance) %>%\n rename('bulk_abund' = abundance)\nbulk.otu %>% head(n=3)\n\n%%R\n# joining tables\ndf.EMP.j = inner_join(df.EMP, bulk.otu, c('OTU' = 'OTU')) %>%\n filter(Buoyant_density >= min_BD, \n Buoyant_density <= max_BD) \n \ndf.EMP.j %>% head(n=3)",
"Simulated",
"OTU_files = !find $buildDir -name \"OTU_abs1e9_sub.txt\"\n#OTU_files = !find $buildDir -name \"OTU_abs1e9.txt\"\nOTU_files\n\n%%R -i OTU_files\n# loading files\n\ndf.SIM = list()\nfor (x in OTU_files){\n SIM_rep = gsub('/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/Day1_rep10/', '', x)\n #SIM_rep = gsub('/OTU_abs1e9_sub.txt', '', SIM_rep)\n SIM_rep = gsub('/OTU_abs1e9_sub.txt', '', SIM_rep)\n df.SIM[[SIM_rep]] = read.delim(x, sep='\\t') \n }\ndf.SIM = do.call('rbind', df.SIM)\ndf.SIM$SIM_rep = gsub('\\\\.[0-9]+$', '', rownames(df.SIM))\nrownames(df.SIM) = 1:nrow(df.SIM)\ndf.SIM %>% head\n\n%%R\n## edit table\ndf.SIM.nt = df.SIM %>%\n filter(count > 0) %>%\n group_by(SIM_rep, library, BD_mid) %>%\n summarize(n_taxa = n()) %>%\n filter(BD_mid >= min_BD, \n BD_mid <= max_BD)\ndf.SIM.nt %>% head ",
"'bulk soil' community files",
"# loading comm files\ncomm_files = !find $buildDir -name \"bulk-core_comm_target.txt\"\ncomm_files\n\n%%R -i comm_files\n\ndf.comm = list()\nfor (f in comm_files){\n rep = gsub('.+/Day1_rep10/([0-9]+)/.+', '\\\\1', f)\n df.comm[[rep]] = read.delim(f, sep='\\t') %>%\n dplyr::select(library, taxon_name, rel_abund_perc) %>%\n rename('bulk_abund' = rel_abund_perc) %>%\n mutate(bulk_abund = bulk_abund / 100)\n}\n\ndf.comm = do.call('rbind', df.comm)\ndf.comm$SIM_rep = gsub('\\\\.[0-9]+$', '', rownames(df.comm))\nrownames(df.comm) = 1:nrow(df.comm)\ndf.comm %>% head(n=3)\n\n%%R\n## joining tables\ndf.SIM.j = inner_join(df.SIM, df.comm, c('SIM_rep' = 'SIM_rep',\n 'library' = 'library',\n 'taxon' = 'taxon_name')) %>%\n filter(BD_mid >= min_BD, \n BD_mid <= max_BD)\n \ndf.SIM.j %>% head(n=3)\n\n%%R \n# filtering & combining emperical w/ simulated data\n\n## emperical \nmax_BD_range = max(df.EMP.j$Buoyant_density) - min(df.EMP.j$Buoyant_density)\ndf.EMP.j.f = df.EMP.j %>%\n filter(abundance > 0) %>%\n group_by(OTU) %>%\n summarize(mean_rel_abund = mean(bulk_abund),\n min_BD = min(Buoyant_density),\n max_BD = max(Buoyant_density),\n BD_range = max_BD - min_BD,\n BD_range_perc = BD_range / max_BD_range * 100) %>%\n ungroup() %>%\n mutate(dataset = 'emperical',\n SIM_rep = NA)\n\n## simulated\nmax_BD_range = max(df.SIM.j$BD_mid) - min(df.SIM.j$BD_mid)\ndf.SIM.j.f = df.SIM.j %>%\n filter(count > 0) %>%\n group_by(SIM_rep, taxon) %>%\n summarize(mean_rel_abund = mean(bulk_abund),\n min_BD = min(BD_mid),\n max_BD = max(BD_mid),\n BD_range = max_BD - min_BD,\n BD_range_perc = BD_range / max_BD_range * 100) %>%\n ungroup() %>%\n rename('OTU' = taxon) %>%\n mutate(dataset = 'simulated')\n\n## join\ndf.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%\n filter(BD_range_perc > 0,\n mean_rel_abund > 0)\n\ndf.j$SIM_rep = reorder(df.j$SIM_rep, df.j$SIM_rep %>% as.numeric)\n\ndf.j %>% head(n=3)\n\n%%R -h 400\n## plotting\nggplot(df.j, aes(mean_rel_abund, BD_range_perc, color=SIM_rep)) +\n geom_point(alpha=0.3) +\n scale_x_log10() +\n scale_y_continuous() +\n labs(x='Pre-fractionation abundance', y='% of total BD range') +\n facet_grid(dataset ~ .) +\n theme_bw() +\n theme(\n text = element_text(size=16),\n panel.grid = element_blank()#,\n #legend.position = 'none'\n )\n",
"BD span of just overlapping taxa\n\nTaxa overlapping between emperical data and genomes in dataset\nThese taxa should have the same relative abundances in both datasets.\nThe comm file was created from the emperical dataset phyloseq file.",
"%%R -i targetFile\n\ndf.target = read.delim(targetFile, sep='\\t')\ndf.target %>% nrow %>% print\ndf.target %>% head(n=3)\n\n%%R\n# filtering to just target taxa\ndf.j.t = df.j %>% \n filter(OTU %in% df.target$OTU) \ndf.j %>% nrow %>% print\ndf.j.t %>% nrow %>% print\n\n## plotting\nggplot(df.j.t, aes(mean_rel_abund, BD_range_perc, color=SIM_rep)) +\n geom_point(alpha=0.5, shape='O') +\n scale_x_log10() +\n scale_y_continuous() +\n #scale_color_manual(values=c('blue', 'red')) +\n labs(x='Pre-fractionation abundance', y='% of total BD range') +\n facet_grid(dataset ~ .) +\n theme_bw() +\n theme(\n text = element_text(size=16),\n panel.grid = element_blank()#,\n #legend.position = 'none'\n )",
"Correlation between relative abundance and BD_range diff\n\nAre low abundant taxa more variable in their BD span",
"%%R\n# formatting data\ndf.1 = df.j.t %>% \n filter(dataset == 'simulated') %>%\n select(SIM_rep, OTU, mean_rel_abund, BD_range, BD_range_perc)\n\ndf.2 = df.j.t %>%\n filter(dataset == 'emperical') %>%\n select(SIM_rep, OTU, mean_rel_abund, BD_range, BD_range_perc)\n\ndf.12 = inner_join(df.1, df.2, c('OTU' = 'OTU')) %>%\n mutate(BD_diff_perc = BD_range_perc.y - BD_range_perc.x)\n\n\ndf.12$SIM_rep.x = reorder(df.12$SIM_rep.x, df.12$SIM_rep.x %>% as.numeric)\n\n%%R -w 800 -h 500\n\nggplot(df.12, aes(mean_rel_abund.x, BD_diff_perc)) +\n geom_point(alpha=0.5) +\n scale_x_log10() +\n labs(x='Pre-fractionation relative abundance', \n y='Difference in % of gradient spanned\\n(emperical - simulated)',\n title='Overlapping taxa') +\n facet_wrap(~ SIM_rep.x) +\n theme_bw() +\n theme(\n text = element_text(size=16),\n panel.grid = element_blank(),\n legend.position = 'none'\n )\n",
"Notes\n\nbetween Day1_rep10, Day1_richFromTarget_rep10, and Day1_add_Rich_rep10:\nDay1_rep10 has the most accurate representation of BD span (% of gradient spanned by taxa).\nAccuracy drops at ~1e-3 to ~5e-4, but this is caused by detection limits (veil-line effect).\n\n\n\nComparing abundance distributions of overlapping taxa",
"%%R\n\njoin_abund_dists = function(df.EMP.j, df.SIM.j, df.target){\n \n ## emperical \n df.EMP.j.f = df.EMP.j %>%\n filter(abundance > 0) %>%\n #filter(!OTU %in% c('OTU.32', 'OTU.2', 'OTU.4')) %>% # TEST\n dplyr::select(OTU, sample, abundance, Buoyant_density, bulk_abund) %>%\n mutate(dataset = 'emperical', SIM_rep = NA) %>%\n filter(OTU %in% df.target$OTU) \n \n ## simulated\n df.SIM.j.f = df.SIM.j %>%\n filter(count > 0) %>%\n #filter(!taxon %in% c('OTU.32', 'OTU.2', 'OTU.4')) %>% # TEST\n dplyr::select(taxon, fraction, count, BD_mid, bulk_abund, SIM_rep) %>%\n rename('OTU' = taxon,\n 'sample' = fraction,\n 'Buoyant_density' = BD_mid,\n 'abundance' = count) %>%\n mutate(dataset = 'simulated') %>%\n filter(OTU %in% df.target$OTU) \n \n ## getting just intersecting OTUs\n OTUs.int = intersect(df.EMP.j.f$OTU, df.SIM.j.f$OTU)\n \n df.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%\n filter(OTU %in% OTUs.int) %>%\n group_by(sample) %>%\n mutate(rel_abund = abundance / sum(abundance))\n \n cat('Number of overlapping OTUs between emperical & simulated:', \n df.j$OTU %>% unique %>% length, '\\n\\n')\n return(df.j)\n }\n\n\ndf.j = join_abund_dists(df.EMP.j, df.SIM.j, df.target)\ndf.j %>% head(n=3) %>% as.data.frame \n\n%%R\n# closure operation\ndf.j = df.j %>%\n ungroup() %>%\n mutate(SIM_rep = SIM_rep %>% as.numeric) %>%\n group_by(dataset, SIM_rep, sample) %>%\n mutate(rel_abund_c = rel_abund / sum(rel_abund)) %>%\n ungroup()\n\ndf.j %>% head(n=3) %>% as.data.frame\n\n%%R -h 1500 -w 800\n# plotting \nplot_abunds = function(df){\n p = ggplot(df, aes(Buoyant_density, rel_abund_c, fill=OTU)) +\n geom_area(stat='identity', position='dodge', alpha=0.5) +\n labs(x='Buoyant density', \n y='Subsampled community\\n(relative abundance for subset taxa)') +\n theme_bw() +\n theme( \n text = element_text(size=16),\n legend.position = 'none',\n axis.title.y = element_text(vjust=1), \n axis.title.x = element_blank(),\n plot.margin=unit(c(0.1,1,0.1,1), \"cm\")\n )\n return(p)\n }\n\n\n# simulations\ndf.j.f = df.j %>%\n filter(dataset == 'simulated')\np.SIM = plot_abunds(df.j.f)\np.SIM = p.SIM + facet_grid(SIM_rep ~ .)\n\n# emperical\ndf.j.f = df.j %>%\n filter(dataset == 'emperical')\np.EMP = plot_abunds(df.j.f)\n\n# status\ncat('Number of overlapping taxa:', df.j$OTU %>% unique %>% length, '\\n')\n\n# make figure\ngrid.arrange(p.EMP, p.SIM, ncol=1, heights=c(1,5))",
"Calculating center of mass for overlapping taxa\n\nweighted mean BD, where weights are relative abundances",
"%%R\n\ncenter_mass = function(df){\n df = df %>%\n group_by(dataset, SIM_rep, OTU) %>%\n summarize(center_mass = weighted.mean(Buoyant_density, rel_abund_c, na.rm=T),\n median_rel_abund_c = median(rel_abund_c)) %>%\n ungroup()\n return(df)\n}\n\ndf.j.cm = center_mass(df.j) \n\n%%R -w 650\n# getting mean cm for all SIM_reps\ndf.j.cm.s = df.j.cm %>%\n group_by(dataset, OTU) %>%\n summarize(mean_cm = mean(center_mass, na.rm=T),\n stdev_cm = sd(center_mass),\n median_rel_abund_c = first(median_rel_abund_c)) %>%\n ungroup() %>%\n spread(dataset, mean_cm) %>%\n group_by(OTU) %>%\n summarize(stdev_cm = mean(stdev_cm, na.rm=T),\n emperical = mean(emperical, na.rm=T),\n simulated = mean(simulated, na.rm=T),\n median_rel_abund_c = first(median_rel_abund_c)) %>%\n ungroup()\n\n# check\ncat('Number of OTUs:', df.j.cm.s$OTU %>% unique %>% length, '\\n')\n\n# plotting\nggplot(df.j.cm.s, aes(emperical, simulated, color=median_rel_abund_c,\n ymin = simulated - stdev_cm,\n ymax = simulated + stdev_cm)) +\n geom_pointrange() +\n stat_function(fun = function(x) x, linetype='dashed', alpha=0.5, color='red') +\n scale_x_continuous(limits=c(1.69, 1.74)) +\n scale_y_continuous(limits=c(1.705, 1.74)) +\n scale_color_gradient(trans='log') +\n labs(title='Center of mass') +\n theme_bw() +\n theme(\n text = element_text(size=16)\n )",
"Notes\n\nLeaving out the PCR simulation does not help with simulation accuracy for center of mass on overlapping taxa\n\nplotting taxon abundance vs diff between emperical & simulated",
"%%R\n\ndf.j.cm.s.f = df.j.cm.s %>%\n mutate(CM_diff = emperical - simulated)\n\nggplot(df.j.cm.s.f, aes(median_rel_abund_c, CM_diff)) +\n geom_point() +\n scale_x_log10() +\n labs(x='Relative abundance', y='Center of mass (Emperical - Simulated)', title='Center of mass') +\n theme_bw() +\n theme(\n text = element_text(size=16)\n )",
"Notes\n\nNo clear pattern between OTU relative abundance (relative for just the overlapping taxa) and the difference in center between simulated and emperical data\n\nWhat is causing the inter-SIM_rep varition in center of mass?\npossibilities:\n\nsubsampling simulation\ndefinately contributes to a lot of the variation (re-ran the above cells with the OTU table files)\nthe samples then become more centered around a BD of 1.725 (approx. center of gradient)\nThis suggests that the DBL is causing too much smearing. \nNeed to try a run w/out DBL \n\n\nDBL 'smearing' simultion\ndiffusion simulation"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
anshbansal/anshbansal.github.io
|
udacity_data_science_notes/intro_data_analysis/lesson_01/L1_Starter_Code.ipynb
|
mit
|
[
"Lesson 1: Data Analysis Process\n01 Introduction\n\nData Analyst is someone who uses data to answer questions\nUsing data to answer questions like\nWhat makes students more likely to submit their projects\nWhat countries around the world have the highest and lowest employment rates\nHow does subway ridership vary by location, time of day and weather conditions\n\n\nLearn to use libraries that will make data analysis a lot easier\nNumpy\nPandas\nMatplotlib\n\n\n\n02 - Problems Solved by Data Analysts\nHearing Data analysis people generally think person working in tech company & Silicon valley. But it is not necessary. It is used outside tech companies also.\nTech-y systems\n- Netflix uses it for providing personalized movie recommendations\n- facebook uses them in their news feed ranking algorithm\n- okcupid uses it to provide good romantic matches\nResources\n* Paper by Facebook on exposure to ideologically diverse information\n* OKCupid blog post on the best questions to ask on a first date\n* How Walmart used data analysis to increase sales\n* How Bill James applied data analysis to baseball\n* A pharmaceutical company uses data analysis to predict which chemical compounds are likely to make effective drugs\n03 - Data Analysis Process\n\nQuestion\nstarts with\nquestion that you want to answer\na problem that you want to solve\n\n\n\n\nWrangle\nData acquisiton\nfind that which is needed for answering the question\n\n\nData cleaning\ncleaning up any problems that you find\n\n\n\n\nExplore\ngetting familiar with data\nbuild intuition\nfinding patterns\n\n\nDraw Conclusions or make predictions\nexample\nPrediction: Netflix needs to find which movies the users will like\nConclusion: Facebook found that users are less likely to click some articles\n\n\nUsually requires machine learning or statistics\n\n\nCommunicate\nfinding as useful as your ability to share them\nmany ways to do this\nvisualization is almost always useful\n\n\n\nNot always straight forward process. We keep on exploring and need to wrangle more.\n\nWe may also need to go back to the question as we get more familiar with the data\n\nInstallation and common instructions\n\nAnaconda download\nIf you have Anaconda already installed\nconda update conda\nconda update anaconda\n\n\nRunning iPython notebook\njupyter notebook ipython_notebook_tutorial.ipynb\n\n\n\n04 - Intro to CSVs (Comma Separated Values)\n\nEasier to process than other formats like xls\n\n05 - CSVs in Python",
"## As list of rows\n\n# Option 1: Each row is a list\ncsv = [\n ['A1', 'A2'],\n ['B1', 'B2']\n]\n\nprint csv\n\n# Option 2: Each row is a dictionary\n# Better if your csv has a header\ncsv = [\n {'name': 'Aseem', 'age': '25'},\n {'name': 'Aseem', 'age': '25'}\n]\n\nprint csv",
"Load Data from CSVs",
"import unicodecsv\n\n## Longer version of code (replaced with shorter, equivalent version below)\n\n# enrollments = []\n# f = open('enrollments.csv', 'rb')\n# reader = unicodecsv.DictReader(f)\n# for row in reader:\n# enrollments.append(row)\n# f.close()\n\nwith open('enrollments.csv', 'rb') as f:\n reader = unicodecsv.DictReader(f)\n enrollments = list(reader)\n\nenrollments[0]\n\n#####################################\n# 1 #\n#####################################\n\n## Read in the data from daily_engagement.csv and project_submissions.csv \n## and store the results in the below variables.\n## Then look at the first row of each table.\n\ndef read_csv(file_name):\n with open(file_name, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)\n\ndaily_engagement = read_csv('daily_engagement.csv')\nproject_submissions = read_csv('project_submissions.csv')\n\nprint daily_engagement[0]\nprint \"\\n\"\nprint project_submissions[0]",
"This page contains documentation for Python's csv module. Instead of csv, you'll be using unicodecsv in this course. unicodecsv works exactly the same as csv, but it comes with Anaconda and has support for unicode. The csv documentation page is still the best way to learn how to use the unicodecsv library, since the two libraries work exactly the same way.\nThis page explains the difference between iterators and lists in Python, and how to use iterators.\n07 - Fixing Data Types\ncsv module reads every value as a string rather than actual data types. This could be confusing. Hence, it is better to fix this now rather than waiting for later.",
"from datetime import datetime as dt\n\n# Takes a date as a string, and returns a Python datetime object. \n# If there is no date given, returns None\ndef parse_date(date):\n if date == '':\n return None\n else:\n return dt.strptime(date, '%Y-%m-%d')\n \n# Takes a string which is either an empty string or represents an integer,\n# and returns an int or None.\ndef parse_maybe_int(i):\n if i == '':\n return None\n else:\n return int(i)\n\n# Clean up the data types in the enrollments table\nfor enrollment in enrollments:\n enrollment['cancel_date'] = parse_date(enrollment['cancel_date'])\n enrollment['days_to_cancel'] = parse_maybe_int(enrollment['days_to_cancel'])\n enrollment['is_canceled'] = enrollment['is_canceled'] == 'True'\n enrollment['is_udacity'] = enrollment['is_udacity'] == 'True'\n enrollment['join_date'] = parse_date(enrollment['join_date'])\n \nenrollments[0]\n\ndef to_float(record, key):\n record[key] = float(record[key])\n\ndef to_int(record, key):\n record[key] = int(float(record[key]))\n\n# Clean up the data types in the engagement table\nfor engagement_record in daily_engagement:\n to_int(engagement_record, 'lessons_completed')\n to_int(engagement_record, 'num_courses_visited')\n to_int(engagement_record, 'projects_completed')\n to_float(engagement_record, 'total_minutes_visited')\n engagement_record['utc_date'] = parse_date(engagement_record['utc_date'])\n \ndaily_engagement[0]\n\n# Clean up the data types in the submissions table\nfor submission in project_submissions:\n submission['completion_date'] = parse_date(submission['completion_date'])\n submission['creation_date'] = parse_date(submission['creation_date'])\n\nproject_submissions[0]",
"Note when running the above cells that we are actively changing the contents of our data variables. If you try to run these cells multiple times in the same session, an error will occur.\nQuestions\n\nHow long to submit projects?\nHow do students who submit their projects different from the students who don't?\n\n10 Investigating the Data\nWe will run some numbers to get some understanding about the data.\nFor removing an element from a dictionary, this post might be helpful",
"#####################################\n# 3 #\n#####################################\n\n## Rename the \"acct\" column in the daily_engagement table to \"account_key\".\n\n# NOTE Added later after finding the problems in the data\nfor engagement_record in daily_engagement:\n # Rename the \"acct\" column in the daily_engagement table to \"account_key\".\n engagement_record['account_key'] = engagement_record['acct']\n del engagement_record['acct']\n\n#####################################\n# 2 #\n#####################################\n\n## Find the total number of rows and the number of unique students (account keys)\n## in each table.\n \ndef unique_accounts(data):\n result = set()\n for row in data:\n result.add(row['account_key'])\n return result\n\ndef print_total_and_unique(name, data):\n print \"total {0} = {1}, unique {2}\".format(name, len(data), len(unique_accounts(data)))\n\nprint_total_and_unique(\"enrollments\", enrollments)\nprint_total_and_unique(\"daily engagements\", daily_engagement)\nprint_total_and_unique(\"project submissions\", project_submissions)",
"The reason total enrollments and unique enrollments are different is that students can enroll, cancel and then re-enroll\nThere are much more daily engagegement (136240) compared to enrollments. That is expected as each student will have an entry for every day\n\n11 Problems in the Data\n\nThe number of unique engagements are not the same as unique enrollments. It should be as engagement table is supposed to contain a row for each day a student was enrolled in the course\nEngagement table has acct while other two tables have account_key. Renamed this so that we can use a single function instead of copy pasting code\n\n13 - Missing Engagement Records\n\nWhy are students missing from engagement table?\nIf there is something in your data that you don't understand it is important to spend time and understand that \nas we could be missing something important\ncannot trust your results \n\n\n\nInvestigating problems\n\nIdentify surprising points\nPrint out a few data points\n\nWe will print one account without daily engagement data and one account with daily engagement data. Then we will compare and see if we can find anything",
"#####################################\n# 4 #\n#####################################\n\n## Find any one student enrollments where the student \n## is missing from the daily engagement table.\n## Output that enrollment.\n\n\ndef get_one(data):\n for row in data:\n return row\n\ndef get_account_data(data, account_key):\n for row in data:\n if row['account_key'] == account_key:\n return row\n\ndef audit_all_enrollment_should_have_engagement(enrollments, engagements):\n unique_enrollments = unique_accounts(enrollments)\n unique_daily_engagement = unique_accounts(engagements)\n\n missing_engagements = unique_enrollments - unique_daily_engagement\n present_engagements = unique_enrollments - missing_engagements\n\n print \"missing \\n {}\".format(\n get_account_data(enrollments, get_one(missing_engagements))\n )\n print \"\"\n print \"present \\n {}\".format(\n get_account_data(enrollments, get_one(present_engagements))\n )\n \naudit_all_enrollment_should_have_engagement(enrollments, daily_engagement)",
"I notice that is_udacity is True for the missing record while it is False for the present record.\nThe instructor talks with a Udacity Data Scientist and they share that for these are Udacity test accounts and they may not have data in the daily engagement table.\nSo we will go ahead and remove these test accounts from the data set.",
"# Create a set of the account keys for all Udacity test accounts\nudacity_test_accounts = set()\nfor enrollment in enrollments:\n if enrollment['is_udacity']:\n udacity_test_accounts.add(enrollment['account_key'])\nlen(udacity_test_accounts)\n\n# Given some data with an account_key field, \n# removes any records corresponding to Udacity test accounts\ndef remove_udacity_accounts(data):\n non_udacity_data = []\n for data_point in data:\n if data_point['account_key'] not in udacity_test_accounts:\n non_udacity_data.append(data_point)\n return non_udacity_data\n\n# Remove Udacity test accounts from all three tables\nnon_udacity_enrollments = remove_udacity_accounts(enrollments)\nnon_udacity_engagement = remove_udacity_accounts(daily_engagement)\nnon_udacity_submissions = remove_udacity_accounts(project_submissions)\n\nprint len(non_udacity_enrollments)\nprint len(non_udacity_engagement)\nprint len(non_udacity_submissions)",
"At this point we repeat and ensure that this will all of the surprises related to our earlier observation. This is a common process that we need to do during data analysis.\n\nChecking for More Problem Records\nSo we run the earlier code again and ensure that we do not gave any more surprises",
"#####################################\n# 5 #\n#####################################\n\n## Find the number of surprising data points (enrollments missing from\n## the engagement table) that remain, if any.\nprint_total_and_unique(\"enrollments\", non_udacity_enrollments)\nprint_total_and_unique(\"daily engagements\", non_udacity_engagement)",
"Tracking Down the Remaining Problems\nWe see that we still have something left in the data that we are not quite sure about as the unique numbers still do not match. So we repeat and try to find what problem still remains.",
"audit_all_enrollment_should_have_engagement(non_udacity_enrollments, non_udacity_engagement)",
"Looking at the above data we see that days_to_cancel is 0 for the missing account. \nThe join_date is the same as cancel_date. \nProbably a person needs to be enrolled at least a day for there to be an engagement record.\n\nNow we repeat to see if excluding these account we can no more surprises. For this we need to firstly filter these people out.",
"#Make a list of people who cancelled the same day\npeople_who_cancelled_same_day = set()\nfor enrollment in non_udacity_enrollments:\n if enrollment['days_to_cancel'] == 0:\n people_who_cancelled_same_day.add(enrollment['account_key'])\nlen(people_who_cancelled_same_day)\n\ndef remove_people_who_cancelled_same_day(data):\n no_cancellations = []\n for data_point in data:\n if data_point['account_key'] not in people_who_cancelled_same_day:\n no_cancellations.append(data_point)\n return no_cancellations\n\n# Remove People who cancelled the same day\nenrollments_2 = remove_people_who_cancelled_same_day(non_udacity_enrollments)\nengagement_2 = remove_people_who_cancelled_same_day(non_udacity_engagement)\nsubmissions_2 = remove_people_who_cancelled_same_day(non_udacity_submissions)\n\nprint len(enrollments_2)\nprint len(engagement_2)\nprint len(submissions_2)",
"Now we have done the filtering we will see if our check passes or are there more surprises left",
"audit_all_enrollment_should_have_engagement(enrollments_2, engagement_2)",
"Finally we can see that we have no more surprises left. At least surprises related to someone enrolled not having engagement.\nNow we may or may not want to actually exclude these people when analysing further. Depends on what we are questions we are trying to answer. \n\nRefining the Question\nNow that we don't have any other questions we could move on from the wrangling to the exploration phase\nQuestion\nHow do numbers in the daily_engagement differ for students who pass the first project?\nBut there are a few problems in the question as currently specified.\nProblems\n\nThis will include data from after the project submissions\nThis compares data for different length of time as students may work for different length of time. Say someone submits after a week vs. a month\n\nSo we will only look at \n- engagement from first week\n- exclude students who cancel within the first week",
"#####################################\n# 6 #\n#####################################\n\n## Create a dictionary named paid_students containing all students who either\n## haven't canceled yet or who remained enrolled for more than 7 days. The keys\n## should be account keys, and the values should be the date the student enrolled.\n\npaid_students = {}\nfor enrollment in non_udacity_enrollments:\n if not enrollment['is_canceled'] or enrollment['days_to_cancel'] > 7:\n account_key = enrollment['account_key']\n join_date = enrollment['join_date']\n\n # Not directly adding the key value as \n # having most recent enrollment makes more sense\n if account_key not in paid_students or join_date > paid_students[account_key]:\n paid_students[account_key] = join_date\n\nlen(paid_students)",
"Thinking about it the name paid_students isn't really good as someone who has not cancelled may or may not be a paid student. But I'll go with that so that rest of the lesson remains in-sync with the videos.\nNow we will filter and keep only these students and proceed further based on these only.",
"def keep_paid(data):\n result = []\n for row in data:\n account_key = row['account_key']\n if account_key in paid_students:\n result.append(row)\n return result\n\n# Filter data to keep only for paid enrollments\npaid_enrollments = keep_paid(non_udacity_enrollments)\npaid_engagements = keep_paid(non_udacity_engagement)\npaid_submissions = keep_paid(non_udacity_submissions)",
"Getting Data from First Week\nWe will filter out data to keep only engagement upto the first week.\nI added a function to keep data within n days rather than one week only. What if I want to change it later? Giving an additional parameter helps.",
"# Takes a student's join date and the date of a specific engagement record,\n# and returns True if that engagement record happened within one week\n# of the student joining.\ndef within_one_week(join_date, engagement_date):\n time_delta = engagement_date - join_date\n return time_delta.days < 7\n\ndef within_n_days(join_date, engagement_date, n):\n time_delta = engagement_date - join_date\n return n > time_delta.days >= 0",
"Ignore the following code block till you get to the block Number of Visits in First Week\nThis adds a has_visited column to engagement records for use later",
"for engagement in paid_engagements:\n if engagement['num_courses_visited'] > 0:\n engagement['has_visited'] = 1\n else:\n engagement['has_visited'] = 0\n\n#####################################\n# 7 #\n#####################################\n\n## Create a list of rows from the engagement table including only rows where\n## the student is one of the paid students you just found, and the date is within\n## one week of the student's join date.\n\npaid_engagement_in_first_week = []\nfor engagement in paid_engagements:\n\n account_key = engagement['account_key']\n utc_date = engagement['utc_date']\n join_date = paid_students[account_key]\n\n if within_n_days(join_date, utc_date, 7):\n paid_engagement_in_first_week.append(engagement)\n \nlen(paid_engagement_in_first_week)\n\n# Filter data to keep only for paid enrollments\npaid_enrollments = keep_paid(non_udacity_enrollments)\npaid_engagements = keep_paid(non_udacity_engagement)\npaid_submissions = keep_paid(non_udacity_submissions)\n\nprint_total_and_unique(\"enrollments\", paid_enrollments)\nprint_total_and_unique(\"daily engagements\", paid_engagements)\nprint_total_and_unique(\"project submissions\", paid_submissions)",
"At this point we would like to divide the data into 2 parts\n - student who pass the project\n - student who don't pass\nBut as we have this data about student engagement in the first week why don't we explore it a bit? That will help us understand it better.\nExploring Student Engagement\nLet us explore the average time spent by student in the class.\nWe intially group the engagement data by student.",
"from collections import defaultdict\n\ndef group_by(data, key):\n grouped = defaultdict(list)\n for record in data:\n _key = record[key]\n grouped[_key].append(record)\n return grouped\n\n# Create a dictionary of engagement grouped by student.\n# The keys are account keys, and the values are lists of engagement records.\n \nengagement_by_account = group_by(paid_engagement_in_first_week, 'account_key')\n",
"Now we sum time spent by each student",
"# Create a dictionary with the total minutes each student spent \n# in the classroom during the first week.\n# The keys are account keys, and the values are numbers (total minutes)\n\ndef sum_grouped_by_key(data, key):\n total_by_account = {}\n for account_key, engagement_for_student in data.items():\n total = 0\n for engagement_record in engagement_for_student:\n total += engagement_record[key]\n total_by_account[account_key] = total\n return total_by_account\n\ntotal_minutes_by_account = sum_grouped_by_key(\n engagement_by_account, \n 'total_minutes_visited'\n)\n\nlen(total_minutes_by_account)",
"Now we output the average \n\nWhile we are looking at the mean we will also look at some other statistics\nEven though we know the mean, standard deviation, maximum, and minimum of various metrics, there are a lot of other facts about each metric that would be nice to know. Are more values close to the minimum or the maximum? What is the median? And so on.\nInstead of printing out more statistics, it makes sense to visualize the data using a histogram.",
"%pylab inline\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef summarize(data_dict):\n # Summarize the data about minutes spent in the classroom\n data_vals = data_dict.values()\n print 'Mean:', np.mean(data_vals)\n print 'Standard deviation:', np.std(data_vals)\n print 'Minimum:', np.min(data_vals)\n print 'Maximum:', np.max(data_vals)\n plt.hist(data_vals)",
"The line %matplotlib inline is specifically for IPython notebook, and causes your plots to appear in your notebook rather than a new window. If you are not using IPython notebook, you should not include this line, and instead you should add the line plt.show() at the bottom to show the plot in a new window.\nTo change how many bins are shown for each plot, try using the bins argument to the hist function. You can find documentation for the hist function and the arguments it takes here.",
"summarize(total_minutes_by_account)",
"Debugging Data Analysis Code",
"#####################################\n# 8 #\n#####################################\n\n## Go through a similar process as before to see if there is a problem.\n## Locate at least one surprising piece of data, output it, and take a look at it.",
"26 - Lessons Completed in First Week",
"#####################################\n# 9 #\n#####################################\n\n## Adapt the code above to find the mean, standard deviation, minimum, and maximum for\n## the number of lessons completed by each student during the first week. Try creating\n## one or more functions to re-use the code above.\ntotal_lessons_by_account = sum_grouped_by_key(\n engagement_by_account, \n 'lessons_completed'\n)\n\nsummarize(total_lessons_by_account)",
"28 - Number of Visits in the First Week\nWe want to analyze how many days did a student visit the class at all so we will add a has_visted field.",
"######################################\n# 10 #\n######################################\n\n## Find the mean, standard deviation, minimum, and maximum for the number of\n## days each student visits the classroom during the first week.\ndays_visited_by_account = sum_grouped_by_key(\n engagement_by_account, \n 'has_visited'\n)\n\nsummarize(days_visited_by_account)",
"Splitting out Passing Students\nNow we get to the part where we are splitting the data into 2 parts - those who pass and those who don't pass. Then we will try and figure out what was the difference between their engagement.",
"paid_submissions[0]\n\n######################################\n# 11 #\n######################################\n\n## Create two lists of engagement data for paid students in the first week.\n## The first list should contain data for students who eventually pass the\n## subway project, and the second list should contain data for students\n## who do not.\n\nsubway_project_lesson_keys = ['746169184', '3176718735']\n\n\npassing_submission = set()\n\nfor submission in paid_submissions:\n if submission['lesson_key'] in subway_project_lesson_keys and \\\n submission['assigned_rating'] in ['PASSED', 'DISTINCTION']:\n passing_submission.add(submission['account_key'])\n\n \npassing_engagement = []\nnon_passing_engagement = []\nfor engagement in paid_engagement_in_first_week:\n if engagement['account_key'] in passing_submission:\n passing_engagement.append(engagement)\n else:\n non_passing_engagement.append(engagement)\n \nprint \"Passing engagement is {}\".format(len(passing_engagement))\nprint \"Non Passing engagement is {}\".format(len(non_passing_engagement))",
"Comparing the Two Student Groups",
"######################################\n# 12 #\n######################################\n\n## Compute some metrics you're interested in and see how they differ for\n## students who pass the subway project vs. students who don't. A good\n## starting point would be the metrics we looked at earlier (minutes spent\n## in the classroom, lessons completed, and days visited).\npassing_engagement_by_account = group_by(passing_engagement, 'account_key')\nnon_passing_engagement_by_account = group_by(non_passing_engagement, 'account_key')\n\ndef summarize_data_for_key(data, key):\n by_key = sum_grouped_by_key(data, key)\n summarize(by_key)\n\nsummarize_data_for_key(passing_engagement_by_account, 'total_minutes_visited')\n\nsummarize_data_for_key(non_passing_engagement_by_account, 'total_minutes_visited')",
"We can see that the mean is much higher. We would expect passing students to be spending some more time compared to non passing students. The difference is 2.5 hours for non-passing vs. 6.5 hours for passing students\nLet's now do a comparsion for lessons_completed",
"summarize_data_for_key(passing_engagement_by_account, 'lessons_completed')\n\nsummarize_data_for_key(non_passing_engagement_by_account, 'lessons_completed')",
"Again we can see that the average is higher for students who passed.\nNow let's see what kind of difference did visits had for passing vs non-passing students",
"summarize_data_for_key(passing_engagement_by_account, 'has_visited')\n\nsummarize_data_for_key(non_passing_engagement_by_account, 'has_visited')",
"Again the mean is higher.\nOut of all of these the minutes spent seems to be most striking is the minutes spent.\nBut we need to understand that just spending higher time does not mean the student will pass. In other words we have just found a correlation between the two not causation. For ensuring causation we will need to use statistics.\nThere are many things which could be correlated but not actually cause causation. For example eating cheese is correlated with number of deaths by being tangled in bedsheets. Do you think one of these is causing the other?\n\nUS spending on science, space and technology correlates with Suicides by hanging, strangulation and suffocation. Do you think one of these is causing the other?\n\nThere are many more for which you should visit this website\nThere could be other factors causing both of them or it may be just random.\nFor our case there could be many factors which cause passing projects and spending time\n- Level of interest in Data Science\n- Background knowledge in Programming, Statistics\nTo make sure that this is actually causation we need to run A/B Test. To learn more about using online experiments to determine whether one change causes another, take the Udacity course A/B Testing.\nMaking Histograms",
"######################################\n# 13 #\n######################################\n\n## Make histograms of the three metrics we looked at earlier for both\n## students who passed the subway project and students who didn't. You\n## might also want to make histograms of any other metrics you examined.",
"Making Predictions\n\nWe may also want to find which students are most likely to pass their project based on the data that we have with us so far.\n\n\n38 - Communication\n\nwhich of your finding are most interesting\nhow will you present them?\n\ne.g.\n- Number of minutes spent can be communicated simply by saying that on an average students who pass spent X hours more than the students who did not pass\n- For the difference in days visited by the 2 groups showing histograms could be better\nWhen sharing visualizations you should polish them and explain what trends you are observing in them\nImproving Plots and Sharing Findings\n\nYou can add axis labels by using plt.xlabel(\"Label for x axis\"). Similarly for y labels\nYou can add title using plt.title(\"Title of plot\")\nJust importing seaborn can improve your plots\nYou'll also frequently want to add some arguments to your plot to tune how it looks. You can see what arguments are available on the documentation page for the hist function. One common argument to pass is the bins argument, which sets the number of bins used by your histogram. For example, plt.hist(data, bins=20) would make sure your histogram has 20 bins",
"non_passing_visits = sum_grouped_by_key(non_passing_engagement_by_account, 'has_visited')\n\nplt.hist(non_passing_visits.values(), bins=8)\nplt.xlabel('Number of days')\nplt.title('Distribution of classroom visits in the first week ' + \n 'for students who do not pass the subway project')\n\npassing_visits = sum_grouped_by_key(non_passing_engagement_by_account, 'has_visited')\n\nplt.hist(passing_visits.values(), bins=8)\nplt.xlabel('Number of days')\nplt.title('Distribution of classroom visits in the first week ' + \n 'for students who pass the subway project')",
"40 - Data Analysis and Related Terms\nData Analysis vs. Data Science vs. Data Engineering vs. Big Data\nData Science\n- Interchangeably used with Data Anlaysis\n- More focused on building systems\n- May require more experience\nData Engineering\n- More focus on Data Wrangling phase \n - Storage\n - Processing\n- Make pipelines, ensure fault tolerant and scale better\nBig Data\n- Fuzzy term relates to \"a lot\" of data\n- Any of data analysts, data scientists or engineer may work with Big data"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
stephank16/enes_graph_use_case
|
neo4j_prov/notebooks/ENES-prov-1.ipynb
|
gpl-3.0
|
[
"W3C prov based provenance storage in Neo4j\nThis notebook tries to provides a nearly complete mapping between a W3C prov standard based provenance descriptions and a Neo4j graph representation.\nThe approach taken is as follows:\n- take W3C prov json formated document as input\n- import using python-prov tool (result internal w3c prov representation)\n- generate neo4j nodes and relationships and generate graph \nNext steps are:\n- example queries\n- refinement and discussion of neo-prov-utils based on this notebook\n- development of a neo-prov-utils python package to be included in provenance capture software\n- example usage of package in ENES community provenance capture activities\nW3C prov standard and used w3c prov tool\n\nW3C prov documents overview: https://www.w3.org/TR/prov-overview/\nThe python prov software library is used ( https://github.com/trungdong/prov ) supporting the W3C provenance data model and providing PROV-JSON and PROV-XML implementations.\nThe PROV-JSON representation proposal is described in https://www.w3.org/Submission/2013/SUBM-prov-json-20130424/\n\nIn the following PROV-JSON documents are used as a compact way to \n- specify provenance records \n- exchange provenance info between client and server components\nThe generic W3C prov graph model\nThe generic prov graph model is based on:\n- Nodes (Agent, Entity, Activity) connected by\n- Edges (wasAttributedTo, wasDerivedFrom, wasGeneratedBy, used, wasAssociatedWith)\nsee the following grahical representatin (taken from https://www.w3.org/TR/prov-overview/)",
"from IPython.display import display, Image\nImage(filename='key-concepts.png')",
"Example Prov-JSON export and import",
"from prov.model import ProvDocument\nd1 = ProvDocument()\n\n%%writefile wps-prov.json\n\n{ \n \"prefix\": {\n \"enes\": \"http://www.enes.org/enes_entitiy/\", \n \"workflow\": \"http://www.enes.org/enes/workflow/#\", \n \"dc\": \"http://dublin-core.org/\", \n \"user\": \"http://www.enes.org/enes_entity/user/\", \n \"file\": \"http://www.enes.org/enes_entity/file/\", \n \"esgf\": \"http://carbon.dkrz.de/file/cmip5/\", \n \"enes_data\": \"http://enes_org/enes_data#\"\n }, \n \"entity\": { \n \"enes:input-data-set.nc\": {\n \"dc:title\": \"eval_series_1\", \n \"prov:type\": \"Dataset\", \n \"prov:label\": \"experiment-mpi-m\"\n }, \n \"enes:temporal-mean-result1-v1.nc\": {\n \"dc:title\": \"eval_series_1_1\"\n } \n }, \n \"wasDerivedFrom\": {\n \"enes:process-step1\": {\n \"prov:usedEntity\": \"enes:input-data-set.nc\", \n \"prov:generatedEntity\": \"enes:temporal-mean-result1-v1.nc\"\n }\n }, \n \"activity\": {\n \"workflow:temporal-mean-cdo\": {\n }\n }, \n \"used\": {\n \"enes:used-rel1\": {\n \"prov:entity\": \"enes:input-data-set.nc\", \n \"prov:activity\": \"workflow:temporal-mean-cdo\"\n }\n },\n \"wasGeneratedBy\": {\n \"enes:gen-rel1\": {\n \"prov:entity\": \"enes:temporal-mean-result1-v1.nc\", \n \"prov:activity\": \"workflow:temporal-mean-cdo\"\n }\n },\n \"agent\": {\n \"enes:Stephan Kindermann\": {}\n }, \n \"wasAttributedTo\": {\n \"enes:data-generator-rel1\": {\n \"prov:entity\": \"enes:temporal-mean-result1-v1.nc\", \n \"prov:agent\": \"enes:Stephan Kindermann\"\n }\n } \n}\n\nd2 = ProvDocument.deserialize('wps-prov.json')\n\nxml_result = d2.serialize(format='xml')\n\n%%writefile wps-prov2.xml\n<?xml version=1.0 encoding=ASCII?>\\n<prov:document xmlns:dc=\"http://dublin-core.org/\" xmlns:enes=\"http://www.enes.org/enes_entitiy/\" xmlns:enes_data=\"http://enes_org/enes_data#\" xmlns:esgf=\"http://carbon.dkrz.de/file/cmip5/\" xmlns:file=\"http://www.enes.org/enes_entity/file/\" xmlns:prov=\"http://www.w3.org/ns/prov#\" xmlns:user=\"http://www.enes.org/enes_entity/user/\" xmlns:workflow=\"http://www.enes.org/enes/workflow/#\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\\n <prov:wasDerivedFrom prov:id=\"enes:process-step1\">\\n <prov:generatedEntity prov:ref=\"enes:temporal-mean-result1-v1.nc\"/>\\n <prov:usedEntity prov:ref=\"enes:input-data-set.nc\"/>\\n </prov:wasDerivedFrom>\\n <prov:used prov:id=\"enes:used-rel1\">\\n <prov:activity prov:ref=\"workflow:temporal-mean-cdo\"/>\\n <prov:entity prov:ref=\"enes:input-data-set.nc\"/>\\n </prov:used>\\n <prov:wasAttributedTo prov:id=\"enes:data-generator-rel1\">\\n <prov:entity prov:ref=\"enes:temporal-mean-result1-v1.nc\"/>\\n <prov:agent prov:ref=\"enes:Stephan Kindermann\"/>\\n </prov:wasAttributedTo>\\n <prov:agent prov:id=\"enes:Stephan Kindermann\"/>\\n <prov:entity prov:id=\"enes:temporal-mean-result1-v1.nc\">\\n <dc:title>eval_series_1_1</dc:title>\\n </prov:entity>\\n <prov:entity prov:id=\"enes:input-data-set.nc\">\\n <prov:label>experiment-mpi-m</prov:label>\\n <prov:type xsi:type=\"xsd:string\">Dataset</prov:type>\\n <dc:title>eval_series_1</dc:title>\\n </prov:entity>\\n <prov:activity prov:id=\"workflow:temporal-mean-cdo\"/>\\n <prov:wasGeneratedBy prov:id=\"enes:gen-rel1\">\\n <prov:entity prov:ref=\"enes:temporal-mean-result1-v1.nc\"/>\\n <prov:activity prov:ref=\"workflow:temporal-mean-cdo\"/>\\n </prov:wasGeneratedBy>\\n</prov:document>\n\nd_xml_test = ProvDocument.deserialize('wps-prov2.xml',format='xml')\n\nprint d2.serialize(indent=2)\n\ndef visualize_prov(prov_doc):\n from prov.dot import prov_to_dot\n from IPython.display import Image\n dot = prov_to_dot(prov_doc)\n dot.write_png('tmp1.png')\n dot.write_pdf('tmp1.pdf')\n \n return Image('tmp1.png')\n\nvisualize_prov(d2)",
"Example Transformation to Neo4j graph\nThe transformation code is based on the prov_to_dot() function in the dot.py package of the prov python package mentioned above ( https://github.com/trungdong/prov ). The code was simplified and modified to generate neo4j nodes and relation instead of dot nodes and relations.",
"## d2 graph is input parameter for this cell ..\n\n\nimport six\nfrom py2neo import Graph, Node, Relationship, authenticate\nnode_map = {}\ncount = [0, 0, 0, 0] # counters for node ids\nrecords = d2.get_records()\nrelations = []\nuse_labels = True\nshow_relation_attributes = True\nother_attributes = True\nshow_nary = True\n\ndef _add_node(record):\n count[0] += 1\n node_id = 'n%d' % count[0]\n if use_labels:\n if record.label == record.identifier:\n node_label = '\"%s\"' % six.text_type(record.label)\n else:\n # Fancier label if both are different. The label will be\n # the main node text, whereas the identifier will be a\n # kind of suptitle.\n \n node_label = six.text_type(record.label)+','+six.text_type(record.identifier)\n else:\n node_label = six.text_type(record.identifier)\n\n uri = record.identifier.uri\n \n node = Node(node_id, label=node_label, URL=uri)\n node_map[uri] = node\n \n ## create Node ... ##dot.add_node(node)\n return node\n\ndef _get_node(qname):\n if qname is None:\n print \"ERROR: _get_node called for empty node\"\n #return _get_bnode()\n uri = qname.uri\n if uri not in node_map:\n _add_generic_node(qname)\n return node_map[uri] \n\n\nfrom prov.model import (\n PROV_ACTIVITY, PROV_AGENT, PROV_ALTERNATE, PROV_ASSOCIATION,\n PROV_ATTRIBUTION, PROV_BUNDLE, PROV_COMMUNICATION, PROV_DERIVATION,\n PROV_DELEGATION, PROV_ENTITY, PROV_GENERATION, PROV_INFLUENCE,\n PROV_INVALIDATION, PROV_END, PROV_MEMBERSHIP, PROV_MENTION,\n PROV_SPECIALIZATION, PROV_START, PROV_USAGE, Identifier,\n PROV_ATTRIBUTE_QNAMES, sorted_attributes, ProvException\n)\n\nfor rec in records:\n if rec.is_element():\n _add_node(rec)\n else:\n # Saving the relations for later processing\n relations.append(rec)\n \n \nneo_rels = [] \nfor rec in relations:\n args = rec.args\n # skipping empty records\n if not args:\n continue\n # picking element nodes\n nodes = [\n value for attr_name, value in rec.formal_attributes\n if attr_name in PROV_ATTRIBUTE_QNAMES\n ]\n other_attributes = [\n (attr_name, value) for attr_name, value in rec.attributes\n if attr_name not in PROV_ATTRIBUTE_QNAMES\n ]\n add_attribute_annotation = (\n show_relation_attributes and other_attributes\n )\n add_nary_elements = len(nodes) > 2 and show_nary\n \n if len(nodes) < 2: # too few elements for a relation?\n continue # cannot draw this \n \n if add_nary_elements or add_attribute_annotation:\n # a blank node for n-ary relations or the attribute annotation\n \n # the first segment\n \n rel = Relationship(_get_node(nodes[0]), rec.get_type()._str,_get_node(nodes[1]))\n print \"relationship: \",rel\n neo_rels.append(rel)\n \n if add_nary_elements: \n for node in nodes[2:]:\n if node is not None:\n relx = Relationshipdot.ad(_get_node(nodes[0]), \"...rel_name\",_get_node(node))\n neo_rels.append(rel) \n else:\n # show a simple binary relations with no annotation\n rel = Relationship(_get_node(nodes[0]), rec.get_type()._str,_get_node(nodes[1]))\n neo_rels.append(rel)\n \n\nprint node_map\nprint neo_rels",
"generate neo4j graph based on generated neo4j Nodes map and Relationship list",
"authenticate(\"localhost:7474\", \"neo4j\", \"prolog16\")\n\n# connect to authenticated graph database\ngraph = Graph(\"http://localhost:7474/db/data/\")\ngraph.delete_all()\n\nfor rel in neo_rels:\n graph.create(rel)\n\n%load_ext cypher\n\nresults = %cypher http://neo4j:prolog16@localhost:7474/db/data MATCH (a)-[r]-(b) RETURN a,r, b\n%matplotlib inline\nresults.get_graph()\n\nresults.draw()",
"\"remember\" cells",
"# example info calls on nodes and relations ..\nder = d2.get_records()[0]\nprint der.get_type()._str + \"tst\"\nprint der.attributes\nprint der.is_relation()\nprint der.label\nprint der.value\nprint der.args\nprint der.is_element()\nprint der.formal_attributes\nprint der.get_asserted_types()\nprint der.get_provn"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ymero/pyDataScienceToolkits_Base
|
Visualization/(3)special_curves_plot.ipynb
|
mit
|
[
"内容索引\n\n利萨如曲线 --- 使用标准三角函数绘制\n绘制方波 --- 利用无穷傅里叶级数表示\n绘制锯齿波和三角波",
"%matplotlib inline\nimport numpy as np\nfrom matplotlib.pyplot import plot\nfrom matplotlib.pyplot import show\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec",
"1. 利萨如曲线\n在NumPy中,所有标准三角函数如sin、cos、tan等均有对应的通用函数。利萨如曲线(Lissajous curve)是一种很有趣的使用三角函数的方式。\n利萨如曲线由如下参数方程定义:\n- x = A sin(at + π/2)\n- y = B sin(bt)",
"# 为简单起见,令A和B为1\nt = np.linspace(-np.pi, np.pi, 201)\na = 9\nb = 8\n\nx = np.sin(a*t + np.pi/2)\ny = np.sin(b*t)\n\nplot(x, y)\nshow()\n\ndef lissajous(a, b):\n t = np.linspace(-np.pi, np.pi, 201)\n x = np.sin(a*t + np.pi/2)\n y = np.sin(b*t)\n return x, y\n\n# matplotlib.gridspec.GridSpecBase\n# 指定figure中subplot的位置\ngs = gridspec.GridSpec(3,3)\nfig = plt.figure()\nax = []\n\nfor a in xrange(3):\n for b in xrange(3):\n ax.append(fig.add_subplot(gs[a,b]))\n a1 = a + 6\n b1 = b + 6\n x, y = lissajous(a1, b1)\n ax[-1].set_title('a=%d,b=%d' % (a1,b1))\n ax[-1].plot(x, y)\n\n# 使得子图适应figure的间距\nfig.tight_layout()\nshow()",
"2. 绘制方波\n方波可以近似表示为多个正弦波的叠加。事实上,任意一个方波信号都可以用无穷傅里叶级数表示。",
"Latex(r\"$\\sum_{k=1}^\\infty\\frac{4\\sin((2k-1)t)}{(2k-1)\\pi}$\")\n\nt = np.linspace(-np.pi, np.pi, 201)\nk = np.arange(1,99)\nk = 2*k - 1\nf = np.zeros_like(t)\n\nfor i in range(len(t)):\n f[i] = np.sum(np.sin(k * t[i])/k)\nf = (4/np.pi) * f\n\nplot(t, f)\nshow()",
"锯齿波和三角波\n锯齿波和三角波也是常见的波形。和方波类似,我们也可以将它们表示成无穷傅里叶级数。对锯齿波取绝对值即可得到三角波。",
"# 锯齿波的无穷级数表达式\nLatex(r\"$\\sum_{k=1}^\\infty\\frac{-2\\sin(2\\pi kt)}{k\\pi}$\")\n\nt = np.linspace(-np.pi, np.pi, 201)\nk = np.arange(1,99)\nf = np.zeros_like(t)\n\nfor i in range(len(t)):\n f[i] = np.sum(np.sin(2*np.pi*k * t[i])/k)\nf = (-2/np.pi) * f\n\nplot(t, f)\nshow()\n\nplot(t, np.abs(f),c='g',lw=2.0)\nshow()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
tensorflow/quantum
|
docs/tutorials/hello_many_worlds.ipynb
|
apache-2.0
|
[
"Copyright 2020 The TensorFlow Authors.",
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"Hello, many worlds\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/quantum/tutorials/hello_many_worlds\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/hello_many_worlds.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>\n\nThis tutorial shows how a classical neural network can learn to correct qubit calibration errors. It introduces <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq\" class=\"external\">Cirq</a>, a Python framework to create, edit, and invoke Noisy Intermediate Scale Quantum (NISQ) circuits, and demonstrates how Cirq interfaces with TensorFlow Quantum.\nSetup",
"!pip install tensorflow==2.7.0",
"Install TensorFlow Quantum:",
"!pip install tensorflow-quantum\n\n# Update package resources to account for version changes.\nimport importlib, pkg_resources\nimportlib.reload(pkg_resources)",
"Now import TensorFlow and the module dependencies:",
"import tensorflow as tf\nimport tensorflow_quantum as tfq\n\nimport cirq\nimport sympy\nimport numpy as np\n\n# visualization tools\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom cirq.contrib.svg import SVGCircuit",
"1. The Basics\n1.1 Cirq and parameterized quantum circuits\nBefore exploring TensorFlow Quantum (TFQ), let's look at some <a target=\"_blank\" href=\"https://github.com/quantumlib/Cirq\" class=\"external\">Cirq</a> basics. Cirq is a Python library for quantum computing from Google. You use it to define circuits, including static and parameterized gates.\nCirq uses <a target=\"_blank\" href=\"https://www.sympy.org\" class=\"external\">SymPy</a> symbols to represent free parameters.",
"a, b = sympy.symbols('a b')",
"The following code creates a two-qubit circuit using your parameters:",
"# Create two qubits\nq0, q1 = cirq.GridQubit.rect(1, 2)\n\n# Create a circuit on these qubits using the parameters you created above.\ncircuit = cirq.Circuit(\n cirq.rx(a).on(q0),\n cirq.ry(b).on(q1), cirq.CNOT(control=q0, target=q1))\n\nSVGCircuit(circuit)",
"To evaluate circuits, you can use the cirq.Simulator interface. You replace free parameters in a circuit with specific numbers by passing in a cirq.ParamResolver object. The following code calculates the raw state vector output of your parameterized circuit:",
"# Calculate a state vector with a=0.5 and b=-0.5.\nresolver = cirq.ParamResolver({a: 0.5, b: -0.5})\noutput_state_vector = cirq.Simulator().simulate(circuit, resolver).final_state_vector\noutput_state_vector",
"State vectors are not directly accessible outside of simulation (notice the complex numbers in the output above). To be physically realistic, you must specify a measurement, which converts a state vector into a real number that classical computers can understand. Cirq specifies measurements using combinations of the <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Pauli_matrices\" class=\"external\">Pauli operators</a> $\\hat{X}$, $\\hat{Y}$, and $\\hat{Z}$. As illustration, the following code measures $\\hat{Z}_0$ and $\\frac{1}{2}\\hat{Z}_0 + \\hat{X}_1$ on the state vector you just simulated:",
"z0 = cirq.Z(q0)\n\nqubit_map={q0: 0, q1: 1}\n\nz0.expectation_from_state_vector(output_state_vector, qubit_map).real\n\nz0x1 = 0.5 * z0 + cirq.X(q1)\n\nz0x1.expectation_from_state_vector(output_state_vector, qubit_map).real",
"1.2 Quantum circuits as tensors\nTensorFlow Quantum (TFQ) provides tfq.convert_to_tensor, a function that converts Cirq objects into tensors. This allows you to send Cirq objects to our <a target=\"_blank\" href=\"https://www.tensorflow.org/quantum/api_docs/python/tfq/layers\">quantum layers</a> and <a target=\"_blank\" href=\"https://www.tensorflow.org/quantum/api_docs/python/tfq/get_expectation_op\">quantum ops</a>. The function can be called on lists or arrays of Cirq Circuits and Cirq Paulis:",
"# Rank 1 tensor containing 1 circuit.\ncircuit_tensor = tfq.convert_to_tensor([circuit])\n\nprint(circuit_tensor.shape)\nprint(circuit_tensor.dtype)",
"This encodes the Cirq objects as tf.string tensors that tfq operations decode as needed.",
"# Rank 1 tensor containing 2 Pauli operators.\npauli_tensor = tfq.convert_to_tensor([z0, z0x1])\npauli_tensor.shape",
"1.3 Batching circuit simulation\nTFQ provides methods for computing expectation values, samples, and state vectors. For now, let's focus on expectation values.\nThe highest-level interface for calculating expectation values is the tfq.layers.Expectation layer, which is a tf.keras.Layer. In its simplest form, this layer is equivalent to simulating a parameterized circuit over many cirq.ParamResolvers; however, TFQ allows batching following TensorFlow semantics, and circuits are simulated using efficient C++ code.\nCreate a batch of values to substitute for our a and b parameters:",
"batch_vals = np.array(np.random.uniform(0, 2 * np.pi, (5, 2)), dtype=np.float32)",
"Batching circuit execution over parameter values in Cirq requires a loop:",
"cirq_results = []\ncirq_simulator = cirq.Simulator()\n\nfor vals in batch_vals:\n resolver = cirq.ParamResolver({a: vals[0], b: vals[1]})\n final_state_vector = cirq_simulator.simulate(circuit, resolver).final_state_vector\n cirq_results.append(\n [z0.expectation_from_state_vector(final_state_vector, {\n q0: 0,\n q1: 1\n }).real])\n\nprint('cirq batch results: \\n {}'.format(np.array(cirq_results)))",
"The same operation is simplified in TFQ:",
"tfq.layers.Expectation()(circuit,\n symbol_names=[a, b],\n symbol_values=batch_vals,\n operators=z0)",
"2. Hybrid quantum-classical optimization\nNow that you've seen the basics, let's use TensorFlow Quantum to construct a hybrid quantum-classical neural net. You will train a classical neural net to control a single qubit. The control will be optimized to correctly prepare the qubit in the 0 or 1 state, overcoming a simulated systematic calibration error. This figure shows the architecture:\n<img src=\"./images/nn_control1.png\" width=\"1000\">\nEven without a neural network this is a straightforward problem to solve, but the theme is similar to the real quantum control problems you might solve using TFQ. It demonstrates an end-to-end example of a quantum-classical computation using the tfq.layers.ControlledPQC (Parametrized Quantum Circuit) layer inside of a tf.keras.Model.\nFor the implementation of this tutorial, this architecture is split into 3 parts:\n\nThe input circuit or datapoint circuit: The first three $R$ gates.\nThe controlled circuit: The other three $R$ gates.\nThe controller: The classical neural-network setting the parameters of the controlled circuit.\n\n2.1 The controlled circuit definition\nDefine a learnable single bit rotation, as indicated in the figure above. This will correspond to our controlled circuit.",
"# Parameters that the classical NN will feed values into.\ncontrol_params = sympy.symbols('theta_1 theta_2 theta_3')\n\n# Create the parameterized circuit.\nqubit = cirq.GridQubit(0, 0)\nmodel_circuit = cirq.Circuit(\n cirq.rz(control_params[0])(qubit),\n cirq.ry(control_params[1])(qubit),\n cirq.rx(control_params[2])(qubit))\n\nSVGCircuit(model_circuit)",
"2.2 The controller\nNow define controller network:",
"# The classical neural network layers.\ncontroller = tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation='elu'),\n tf.keras.layers.Dense(3)\n])",
"Given a batch of commands, the controller outputs a batch of control signals for the controlled circuit. \nThe controller is randomly initialized so these outputs are not useful, yet.",
"controller(tf.constant([[0.0],[1.0]])).numpy()",
"2.3 Connect the controller to the circuit\nUse tfq to connect the controller to the controlled circuit, as a single keras.Model. \nSee the Keras Functional API guide for more about this style of model definition.\nFirst define the inputs to the model:",
"# This input is the simulated miscalibration that the model will learn to correct.\ncircuits_input = tf.keras.Input(shape=(),\n # The circuit-tensor has dtype `tf.string` \n dtype=tf.string,\n name='circuits_input')\n\n# Commands will be either `0` or `1`, specifying the state to set the qubit to.\ncommands_input = tf.keras.Input(shape=(1,),\n dtype=tf.dtypes.float32,\n name='commands_input')\n",
"Next apply operations to those inputs, to define the computation.",
"dense_2 = controller(commands_input)\n\n# TFQ layer for classically controlled circuits.\nexpectation_layer = tfq.layers.ControlledPQC(model_circuit,\n # Observe Z\n operators = cirq.Z(qubit))\nexpectation = expectation_layer([circuits_input, dense_2])",
"Now package this computation as a tf.keras.Model:",
"# The full Keras model is built from our layers.\nmodel = tf.keras.Model(inputs=[circuits_input, commands_input],\n outputs=expectation)",
"The network architecture is indicated by the plot of the model below.\nCompare this model plot to the architecture diagram to verify correctness.\nNote: May require a system install of the graphviz package.",
"tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)",
"This model takes two inputs: The commands for the controller, and the input-circuit whose output the controller is attempting to correct. \n2.4 The dataset\nThe model attempts to output the correct correct measurement value of $\\hat{Z}$ for each command. The commands and correct values are defined below.",
"# The command input values to the classical NN.\ncommands = np.array([[0], [1]], dtype=np.float32)\n\n# The desired Z expectation value at output of quantum circuit.\nexpected_outputs = np.array([[1], [-1]], dtype=np.float32)",
"This is not the entire training dataset for this task. \nEach datapoint in the dataset also needs an input circuit.\n2.4 Input circuit definition\nThe input-circuit below defines the random miscalibration the model will learn to correct.",
"random_rotations = np.random.uniform(0, 2 * np.pi, 3)\nnoisy_preparation = cirq.Circuit(\n cirq.rx(random_rotations[0])(qubit),\n cirq.ry(random_rotations[1])(qubit),\n cirq.rz(random_rotations[2])(qubit)\n)\ndatapoint_circuits = tfq.convert_to_tensor([\n noisy_preparation\n] * 2) # Make two copied of this circuit",
"There are two copies of the circuit, one for each datapoint.",
"datapoint_circuits.shape",
"2.5 Training\nWith the inputs defined you can test-run the tfq model.",
"model([datapoint_circuits, commands]).numpy()",
"Now run a standard training process to adjust these values towards the expected_outputs.",
"optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\nloss = tf.keras.losses.MeanSquaredError()\nmodel.compile(optimizer=optimizer, loss=loss)\nhistory = model.fit(x=[datapoint_circuits, commands],\n y=expected_outputs,\n epochs=30,\n verbose=0)\n\nplt.plot(history.history['loss'])\nplt.title(\"Learning to Control a Qubit\")\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"Error in Control\")\nplt.show()",
"From this plot you can see that the neural network has learned to overcome the systematic miscalibration.\n2.6 Verify outputs\nNow use the trained model, to correct the qubit calibration errors. With Cirq:",
"def check_error(command_values, desired_values):\n \"\"\"Based on the value in `command_value` see how well you could prepare\n the full circuit to have `desired_value` when taking expectation w.r.t. Z.\"\"\"\n params_to_prepare_output = controller(command_values).numpy()\n full_circuit = noisy_preparation + model_circuit\n\n # Test how well you can prepare a state to get expectation the expectation\n # value in `desired_values`\n for index in [0, 1]:\n state = cirq_simulator.simulate(\n full_circuit,\n {s:v for (s,v) in zip(control_params, params_to_prepare_output[index])}\n ).final_state_vector\n expt = cirq.Z(qubit).expectation_from_state_vector(state, {qubit: 0}).real\n print(f'For a desired output (expectation) of {desired_values[index]} with'\n f' noisy preparation, the controller\\nnetwork found the following '\n f'values for theta: {params_to_prepare_output[index]}\\nWhich gives an'\n f' actual expectation of: {expt}\\n')\n\n\ncheck_error(commands, expected_outputs)",
"The value of the loss function during training provides a rough idea of how well the model is learning. The lower the loss, the closer the expectation values in the above cell is to desired_values. If you aren't as concerned with the parameter values, you can always check the outputs from above using tfq:",
"model([datapoint_circuits, commands])",
"3 Learning to prepare eigenstates of different operators\nThe choice of the $\\pm \\hat{Z}$ eigenstates corresponding to 1 and 0 was arbitrary. You could have just as easily wanted 1 to correspond to the $+ \\hat{Z}$ eigenstate and 0 to correspond to the $-\\hat{X}$ eigenstate. One way to accomplish this is by specifying a different measurement operator for each command, as indicated in the figure below:\n<img src=\"./images/nn_control2.png\" width=\"1000\">\nThis requires use of <code>tfq.layers.Expectation</code>. Now your input has grown to include three objects: circuit, command, and operator. The output is still the expectation value.\n3.1 New model definition\nLets take a look at the model to accomplish this task:",
"# Define inputs.\ncommands_input = tf.keras.layers.Input(shape=(1),\n dtype=tf.dtypes.float32,\n name='commands_input')\ncircuits_input = tf.keras.Input(shape=(),\n # The circuit-tensor has dtype `tf.string` \n dtype=tf.dtypes.string,\n name='circuits_input')\noperators_input = tf.keras.Input(shape=(1,),\n dtype=tf.dtypes.string,\n name='operators_input')",
"Here is the controller network:",
"# Define classical NN.\ncontroller = tf.keras.Sequential([\n tf.keras.layers.Dense(10, activation='elu'),\n tf.keras.layers.Dense(3)\n])",
"Combine the circuit and the controller into a single keras.Model using tfq:",
"dense_2 = controller(commands_input)\n\n# Since you aren't using a PQC or ControlledPQC you must append\n# your model circuit onto the datapoint circuit tensor manually.\nfull_circuit = tfq.layers.AddCircuit()(circuits_input, append=model_circuit)\nexpectation_output = tfq.layers.Expectation()(full_circuit,\n symbol_names=control_params,\n symbol_values=dense_2,\n operators=operators_input)\n\n# Contruct your Keras model.\ntwo_axis_control_model = tf.keras.Model(\n inputs=[circuits_input, commands_input, operators_input],\n outputs=[expectation_output])",
"3.2 The dataset\nNow you will also include the operators you wish to measure for each datapoint you supply for model_circuit:",
"# The operators to measure, for each command.\noperator_data = tfq.convert_to_tensor([[cirq.X(qubit)], [cirq.Z(qubit)]])\n\n# The command input values to the classical NN.\ncommands = np.array([[0], [1]], dtype=np.float32)\n\n# The desired expectation value at output of quantum circuit.\nexpected_outputs = np.array([[1], [-1]], dtype=np.float32)",
"3.3 Training\nNow that you have your new inputs and outputs you can train once again using keras.",
"optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\nloss = tf.keras.losses.MeanSquaredError()\n\ntwo_axis_control_model.compile(optimizer=optimizer, loss=loss)\n\nhistory = two_axis_control_model.fit(\n x=[datapoint_circuits, commands, operator_data],\n y=expected_outputs,\n epochs=30,\n verbose=1)\n\nplt.plot(history.history['loss'])\nplt.title(\"Learning to Control a Qubit\")\nplt.xlabel(\"Iterations\")\nplt.ylabel(\"Error in Control\")\nplt.show()",
"The loss function has dropped to zero.\nThe controller is available as a stand-alone model. Call the controller, and check its response to each command signal. It would take some work to correctly compare these outputs to the contents of random_rotations.",
"controller.predict(np.array([0,1]))",
"Success: See if you can adapt the check_error function from your first model to work with this new model architecture."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ceos-seo/data_cube_notebooks
|
notebooks/Data_Challenge/DEM.ipynb
|
apache-2.0
|
[
"2022 EY Challenge - Digital Elevation\nThis notebook can be used to create a Digital Elevation Model (DEM) dataset. This elevation information, along with calculated slope, can be used as a \"predictor variable\" to relate to species samples. It is believed that terrain elevation may be a constraint for species habitat. The baseline data is 30-meters spatial resolution [Copernicus DEM GLO-30] and can be found in the MS Planetary Computer catalog: (https://planetarycomputer.microsoft.com/dataset/cop-dem-glo-30)",
"# Supress Warnings \nimport warnings\nwarnings.filterwarnings('ignore')\n\n# Import common GIS tools\nimport numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport rioxarray as rio\nimport rasterio.features\nimport folium\nimport math\n\n# Import Planetary Computer tools\nimport pystac_client\nimport planetary_computer",
"Define the analysis region and view on a map\nFirst, we define our area of interest using latitude and longitude coordinates. Our test region is near Richmond, NSW, Australia. The first line defines the lower-left corner of the bounding box and the second line defines the upper-right corner of the bounding box. GeoJSON format uses a specific order: (longitude, latitude), so be careful when entering the coordinates.",
"# Define the bounding box using corners\nmin_lon, min_lat = (150.62, -33.69) # Lower-left corner (longitude, latitude)\nmax_lon, max_lat = (150.83, -33.48) # Upper-right corner (longitude, latitude)\n\nbbox = (min_lon, min_lat, max_lon, max_lat)\nlatitude = (min_lat, max_lat)\nlongitude = (min_lon, max_lon)\n\ndef _degree_to_zoom_level(l1, l2, margin = 0.0):\n \n degree = abs(l1 - l2) * (1 + margin)\n zoom_level_int = 0\n if degree != 0:\n zoom_level_float = math.log(360/degree)/math.log(2)\n zoom_level_int = int(zoom_level_float)\n else:\n zoom_level_int = 18\n return zoom_level_int\n\ndef display_map(latitude = None, longitude = None):\n\n margin = -0.5\n zoom_bias = 0\n lat_zoom_level = _degree_to_zoom_level(margin = margin, *latitude ) + zoom_bias\n lon_zoom_level = _degree_to_zoom_level(margin = margin, *longitude) + zoom_bias\n zoom_level = min(lat_zoom_level, lon_zoom_level) \n center = [np.mean(latitude), np.mean(longitude)]\n \n map_hybrid = folium.Map(location=center,zoom_start=zoom_level, \n tiles=\" http://mt1.google.com/vt/lyrs=y&z={z}&x={x}&y={y}\",attr=\"Google\")\n \n line_segments = [(latitude[0],longitude[0]),(latitude[0],longitude[1]),\n (latitude[1],longitude[1]),(latitude[1],longitude[0]),\n (latitude[0],longitude[0])]\n \n map_hybrid.add_child(folium.features.PolyLine(locations=line_segments,color='red',opacity=0.8))\n map_hybrid.add_child(folium.features.LatLngPopup()) \n\n return map_hybrid\n\n# Plot bounding box on a map\nf = folium.Figure(width=600, height=600)\nm = display_map(latitude,longitude)\nf.add_child(m)",
"Discover and load the data for analysis\nUsing the pystac_client we can search the Planetary Computer's STAC endpoint for items matching our query parameters. We will look for data tiles (1-degree square) that intersect our bounding box.",
"stac = pystac_client.Client.open(\"https://planetarycomputer.microsoft.com/api/stac/v1\")\nsearch = stac.search(bbox=bbox,collections=[\"cop-dem-glo-30\"])\n\nitems = list(search.get_items())\nprint('Number of 1-degree data tiles connected to our region:',len(items))",
"Next, we'll load the elevation data into an xarray DataArray, calculate the slope between pixels, and then \"clip\" the data to only the pixels within our region (bounding box). The dataset includes elevation (meters) at lat-lon positions (EPSG:4326) at a spatial separation of 30-meters per pixel.",
"signed_asset = planetary_computer.sign(items[0].assets[\"data\"])\n\ndata_elevation = (xr.open_rasterio(signed_asset.href).squeeze().drop(\"band\"))",
"We will create a function to calculate slope (in percent) between pixels. The \"dem\" parameter is the elevation dataset to use for the slope calculation. The \"resolution\" parameter is the pixel spatial resolution of the elevation dataset.",
"from scipy.ndimage import convolve\n\ndef slope_pct(dem, resolution):\n # Kernel for rate of elevation change in x-axis.\n dx_kernel = np.array([[1, 0, -1],\n [2, 0, -2],\n [1, 0, -1]])\n # Kernel for rate of elevation change in y-axis.\n dy_kernel = np.array([[1, 2, 1],\n [0, 0, 0],\n [-1, -2, -1]])\n # Rate of change calculations for each axis.\n dx = convolve(dem, dx_kernel) / (8 * resolution)\n dy = convolve(dem, dy_kernel) / (8 * resolution)\n # Return rise/run * 100 for slope percent.\n return np.sqrt(np.square(dx) + np.square(dy)) * 100\n\ndata_slope = xr.DataArray(slope_pct(data_elevation,30.0),coords=dict(data_elevation.coords))\n\n# Create a combined dataset that includes elevation and slope\nCombined_Data = xr.Dataset({'elevation':data_elevation,'slope':data_slope},coords=data_elevation.coords)\n\nClipped_Data = Combined_Data.sel(x=slice(min_lon,max_lon),y=slice(max_lat,min_lat))",
"Display elevation and slope products",
"Clipped_Data.elevation.plot.imshow(size=8,cmap=plt.cm.terrain,vmin=0.0,vmax=np.max(Clipped_Data.elevation))\nplt.gca().set_aspect('equal')\nplt.title('Terrain Elevation (meters)')\nplt.xlabel('Longitude')\nplt.ylabel('Latitude')\nplt.show()\n\nClipped_Data.slope.plot.imshow(size=8, cmap=plt.cm.nipy_spectral, vmin=0, vmax=50)\nplt.gca().set_aspect('equal')\nplt.title('Terrain Slope (percent)')\nplt.xlabel('Longitude')\nplt.ylabel('Latitude')\nplt.show()",
"Save the output data in a GeoTIFF file",
"filename = \"DEM_sample8.tiff\"\n\n# Set the dimensions of file in pixels\nheight = Clipped_Data.elevation.shape[0]\nwidth = Clipped_Data.elevation.shape[1]\n\n# Define the Coordinate Reference System (CRS) to be common Lat-Lon coordinates\n# Define the tranformation using our bounding box so the Lat-Lon information is written to the GeoTIFF\ngt = rasterio.transform.from_bounds(min_lon,min_lat,max_lon,max_lat,width,height)\nClipped_Data.rio.write_crs(\"epsg:4326\", inplace=True)\nClipped_Data.rio.write_transform(transform=gt, inplace=True);\n\n# Create the GeoTIFF output file using the defined parameters\nwith rasterio.open(filename,'w',driver='GTiff',width=width,height=height,crs='epsg:4326',\n transform=gt,count=2,compress='lzw',dtype='float32') as dst:\n dst.write(Clipped_Data.elevation,1)\n dst.write(Clipped_Data.slope,2)\n print (dst.crs)\n print (dst.transform)\n dst.close()\n\n# Show the location and size of the new output file\n!ls *.tiff -lah",
"How will the participants use this data?\nThe GeoTIFF file will contain the Lat-Lon coordinates of each pixel and will also contain the elevation and slope for each pixel as separate data layers. Since the FrogID data is also Lat-Lon position, it is possible to find the closest pixel using code similar to what is demonstrated below. Once this pixel is found, then the corresponding elevation and slope data values can be used for modeling species distribution.",
"# This is an example for a specific Lon-Lat location randomly selected within our sample region.\nvalues = Clipped_Data.elevation.sel(x=150.71, y=-33.51, method=\"nearest\").values \nprint(\"This is the elevation in meters for the closest pixel: \", np.round(values,1))\nvalues = Clipped_Data.slope.sel(x=150.71, y=-33.51, method=\"nearest\").values \nprint(\"This is the slope in percent for the closest pixel: \", np.round(values,1))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
smorton2/think-stats
|
code/chap12soln.ipynb
|
gpl-3.0
|
[
"Examples and Exercises from Think Stats, 2nd Edition\nhttp://thinkstats2.com\nCopyright 2016 Allen B. Downey\nMIT License: https://opensource.org/licenses/MIT",
"from __future__ import print_function, division\n\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings('ignore', category=FutureWarning)\n\nimport numpy as np\nimport pandas as pd\n\nimport random\n\nimport thinkstats2\nimport thinkplot",
"Time series analysis\nLoad the data from \"Price of Weed\".",
"transactions = pd.read_csv('mj-clean.csv', parse_dates=[5])\ntransactions.head()",
"The following function takes a DataFrame of transactions and compute daily averages.",
"def GroupByDay(transactions, func=np.mean):\n \"\"\"Groups transactions by day and compute the daily mean ppg.\n\n transactions: DataFrame of transactions\n\n returns: DataFrame of daily prices\n \"\"\"\n grouped = transactions[['date', 'ppg']].groupby('date')\n daily = grouped.aggregate(func)\n\n daily['date'] = daily.index\n start = daily.date[0]\n one_year = np.timedelta64(1, 'Y')\n daily['years'] = (daily.date - start) / one_year\n\n return daily",
"The following function returns a map from quality name to a DataFrame of daily averages.",
"def GroupByQualityAndDay(transactions):\n \"\"\"Divides transactions by quality and computes mean daily price.\n\n transaction: DataFrame of transactions\n \n returns: map from quality to time series of ppg\n \"\"\"\n groups = transactions.groupby('quality')\n dailies = {}\n for name, group in groups:\n dailies[name] = GroupByDay(group) \n\n return dailies",
"dailies is the map from quality name to DataFrame.",
"dailies = GroupByQualityAndDay(transactions)",
"The following plots the daily average price for each quality.",
"import matplotlib.pyplot as plt\n\nthinkplot.PrePlot(rows=3)\nfor i, (name, daily) in enumerate(dailies.items()):\n thinkplot.SubPlot(i+1)\n title = 'Price per gram ($)' if i == 0 else ''\n thinkplot.Config(ylim=[0, 20], title=title)\n thinkplot.Scatter(daily.ppg, s=10, label=name)\n if i == 2: \n plt.xticks(rotation=30)\n thinkplot.Config()\n else:\n thinkplot.Config(xticks=[])",
"We can use statsmodels to run a linear model of price as a function of time.",
"import statsmodels.formula.api as smf\n\ndef RunLinearModel(daily):\n model = smf.ols('ppg ~ years', data=daily)\n results = model.fit()\n return model, results",
"Here's what the results look like.",
"from IPython.display import display\n\nfor name, daily in dailies.items():\n model, results = RunLinearModel(daily)\n print(name)\n display(results.summary())",
"Now let's plot the fitted model with the data.",
"def PlotFittedValues(model, results, label=''):\n \"\"\"Plots original data and fitted values.\n\n model: StatsModel model object\n results: StatsModel results object\n \"\"\"\n years = model.exog[:,1]\n values = model.endog\n thinkplot.Scatter(years, values, s=15, label=label)\n thinkplot.Plot(years, results.fittedvalues, label='model', color='#ff7f00')",
"The following function plots the original data and the fitted curve.",
"def PlotLinearModel(daily, name):\n \"\"\"Plots a linear fit to a sequence of prices, and the residuals.\n \n daily: DataFrame of daily prices\n name: string\n \"\"\"\n model, results = RunLinearModel(daily)\n PlotFittedValues(model, results, label=name)\n thinkplot.Config(title='Fitted values',\n xlabel='Years',\n xlim=[-0.1, 3.8],\n ylabel='Price per gram ($)')",
"Here are results for the high quality category:",
"name = 'high'\ndaily = dailies[name]\n\nPlotLinearModel(daily, name)",
"Moving averages\nAs a simple example, I'll show the rolling average of the numbers from 1 to 10.",
"series = np.arange(10)",
"With a \"window\" of size 3, we get the average of the previous 3 elements, or nan when there are fewer than 3.",
"pd.rolling_mean(series, 3)",
"The following function plots the rolling mean.",
"def PlotRollingMean(daily, name):\n \"\"\"Plots rolling mean.\n\n daily: DataFrame of daily prices\n \"\"\"\n dates = pd.date_range(daily.index.min(), daily.index.max())\n reindexed = daily.reindex(dates)\n\n thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.2, label=name)\n roll_mean = pd.rolling_mean(reindexed.ppg, 30)\n thinkplot.Plot(roll_mean, label='rolling mean', color='#ff7f00')\n plt.xticks(rotation=30)\n thinkplot.Config(ylabel='price per gram ($)')",
"Here's what it looks like for the high quality category.",
"PlotRollingMean(daily, name)",
"The exponentially-weighted moving average gives more weight to more recent points.",
"def PlotEWMA(daily, name):\n \"\"\"Plots rolling mean.\n\n daily: DataFrame of daily prices\n \"\"\"\n dates = pd.date_range(daily.index.min(), daily.index.max())\n reindexed = daily.reindex(dates)\n\n thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.2, label=name)\n roll_mean = pd.ewma(reindexed.ppg, 30)\n thinkplot.Plot(roll_mean, label='EWMA', color='#ff7f00')\n plt.xticks(rotation=30)\n thinkplot.Config(ylabel='price per gram ($)')\n\nPlotEWMA(daily, name)",
"We can use resampling to generate missing values with the right amount of noise.",
"def FillMissing(daily, span=30):\n \"\"\"Fills missing values with an exponentially weighted moving average.\n\n Resulting DataFrame has new columns 'ewma' and 'resid'.\n\n daily: DataFrame of daily prices\n span: window size (sort of) passed to ewma\n\n returns: new DataFrame of daily prices\n \"\"\"\n dates = pd.date_range(daily.index.min(), daily.index.max())\n reindexed = daily.reindex(dates)\n\n ewma = pd.ewma(reindexed.ppg, span=span)\n\n resid = (reindexed.ppg - ewma).dropna()\n fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))\n reindexed.ppg.fillna(fake_data, inplace=True)\n\n reindexed['ewma'] = ewma\n reindexed['resid'] = reindexed.ppg - ewma\n return reindexed\n\ndef PlotFilled(daily, name):\n \"\"\"Plots the EWMA and filled data.\n\n daily: DataFrame of daily prices\n \"\"\"\n filled = FillMissing(daily, span=30)\n thinkplot.Scatter(filled.ppg, s=15, alpha=0.2, label=name)\n thinkplot.Plot(filled.ewma, label='EWMA', color='#ff7f00')\n plt.xticks(rotation=30)\n thinkplot.Config(ylabel='Price per gram ($)')",
"Here's what the EWMA model looks like with missing values filled.",
"PlotFilled(daily, name)",
"Serial correlation\nThe following function computes serial correlation with the given lag.",
"def SerialCorr(series, lag=1):\n xs = series[lag:]\n ys = series.shift(lag)[lag:]\n corr = thinkstats2.Corr(xs, ys)\n return corr",
"Before computing correlations, we'll fill missing values.",
"filled_dailies = {}\nfor name, daily in dailies.items():\n filled_dailies[name] = FillMissing(daily, span=30)",
"Here are the serial correlations for raw price data.",
"for name, filled in filled_dailies.items(): \n corr = thinkstats2.SerialCorr(filled.ppg, lag=1)\n print(name, corr)",
"It's not surprising that there are correlations between consecutive days, because there are obvious trends in the data.\nIt is more interested to see whether there are still correlations after we subtract away the trends.",
"for name, filled in filled_dailies.items(): \n corr = thinkstats2.SerialCorr(filled.resid, lag=1)\n print(name, corr)",
"Even if the correlations between consecutive days are weak, there might be correlations across intervals of one week, one month, or one year.",
"rows = []\nfor lag in [1, 7, 30, 365]:\n print(lag, end='\\t')\n for name, filled in filled_dailies.items(): \n corr = SerialCorr(filled.resid, lag)\n print('%.2g' % corr, end='\\t')\n print()",
"The strongest correlation is a weekly cycle in the medium quality category.\nAutocorrelation\nThe autocorrelation function is the serial correlation computed for all lags.\nWe can use it to replicate the results from the previous section.",
"import statsmodels.tsa.stattools as smtsa\n\nfilled = filled_dailies['high']\nacf = smtsa.acf(filled.resid, nlags=365, unbiased=True)\nprint('%0.2g, %.2g, %0.2g, %0.2g, %0.2g' % \n (acf[0], acf[1], acf[7], acf[30], acf[365]))",
"To get a sense of how much autocorrelation we should expect by chance, we can resample the data (which eliminates any actual autocorrelation) and compute the ACF.",
"def SimulateAutocorrelation(daily, iters=1001, nlags=40):\n \"\"\"Resample residuals, compute autocorrelation, and plot percentiles.\n\n daily: DataFrame\n iters: number of simulations to run\n nlags: maximum lags to compute autocorrelation\n \"\"\"\n # run simulations\n t = []\n for _ in range(iters):\n filled = FillMissing(daily, span=30)\n resid = thinkstats2.Resample(filled.resid)\n acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]\n t.append(np.abs(acf))\n\n high = thinkstats2.PercentileRows(t, [97.5])[0]\n low = -high\n lags = range(1, nlags+1)\n thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')",
"The following function plots the actual autocorrelation for lags up to 40 days.\nThe flag add_weekly indicates whether we should add a simulated weekly cycle.",
"def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):\n \"\"\"Plots autocorrelation functions.\n\n dailies: map from category name to DataFrame of daily prices\n nlags: number of lags to compute\n add_weekly: boolean, whether to add a simulated weekly pattern\n \"\"\"\n thinkplot.PrePlot(3)\n daily = dailies['high']\n SimulateAutocorrelation(daily)\n\n for name, daily in dailies.items():\n\n if add_weekly:\n daily = AddWeeklySeasonality(daily)\n\n filled = FillMissing(daily, span=30)\n\n acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)\n lags = np.arange(len(acf))\n thinkplot.Plot(lags[1:], acf[1:], label=name)",
"To show what a strong weekly cycle would look like, we have the option of adding a price increase of 1-2 dollars on Friday and Saturdays.",
"def AddWeeklySeasonality(daily):\n \"\"\"Adds a weekly pattern.\n\n daily: DataFrame of daily prices\n\n returns: new DataFrame of daily prices\n \"\"\"\n fri_or_sat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)\n fake = daily.copy()\n fake.ppg.loc[fri_or_sat] += np.random.uniform(0, 2, fri_or_sat.sum())\n return fake",
"Here's what the real ACFs look like. The gray regions indicate the levels we expect by chance.",
"axis = [0, 41, -0.2, 0.2]\n\nPlotAutoCorrelation(dailies, add_weekly=False)\nthinkplot.Config(axis=axis, \n loc='lower right',\n ylabel='correlation',\n xlabel='lag (day)')",
"Here's what it would look like if there were a weekly cycle.",
"PlotAutoCorrelation(dailies, add_weekly=True)\nthinkplot.Config(axis=axis,\n loc='lower right',\n xlabel='lag (days)')",
"Prediction\nThe simplest way to generate predictions is to use statsmodels to fit a model to the data, then use the predict method from the results.",
"def GenerateSimplePrediction(results, years):\n \"\"\"Generates a simple prediction.\n\n results: results object\n years: sequence of times (in years) to make predictions for\n\n returns: sequence of predicted values\n \"\"\"\n n = len(years)\n inter = np.ones(n)\n d = dict(Intercept=inter, years=years, years2=years**2)\n predict_df = pd.DataFrame(d)\n predict = results.predict(predict_df)\n return predict\n\ndef PlotSimplePrediction(results, years):\n predict = GenerateSimplePrediction(results, years)\n\n thinkplot.Scatter(daily.years, daily.ppg, alpha=0.2, label=name)\n thinkplot.plot(years, predict, color='#ff7f00')\n xlim = years[0]-0.1, years[-1]+0.1\n thinkplot.Config(title='Predictions',\n xlabel='Years',\n xlim=xlim,\n ylabel='Price per gram ($)',\n loc='upper right')",
"Here's what the prediction looks like for the high quality category, using the linear model.",
"name = 'high'\ndaily = dailies[name]\n\n_, results = RunLinearModel(daily)\nyears = np.linspace(0, 5, 101)\nPlotSimplePrediction(results, years)",
"When we generate predictions, we want to quatify the uncertainty in the prediction. We can do that by resampling. The following function fits a model to the data, computes residuals, then resamples from the residuals to general fake datasets. It fits the same model to each fake dataset and returns a list of results.",
"def SimulateResults(daily, iters=101, func=RunLinearModel):\n \"\"\"Run simulations based on resampling residuals.\n\n daily: DataFrame of daily prices\n iters: number of simulations\n func: function that fits a model to the data\n\n returns: list of result objects\n \"\"\"\n _, results = func(daily)\n fake = daily.copy()\n \n result_seq = []\n for _ in range(iters):\n fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)\n _, fake_results = func(fake)\n result_seq.append(fake_results)\n\n return result_seq",
"To generate predictions, we take the list of results fitted to resampled data. For each model, we use the predict method to generate predictions, and return a sequence of predictions.\nIf add_resid is true, we add resampled residuals to the predicted values, which generates predictions that include predictive uncertainty (due to random noise) as well as modeling uncertainty (due to random sampling).",
"def GeneratePredictions(result_seq, years, add_resid=False):\n \"\"\"Generates an array of predicted values from a list of model results.\n\n When add_resid is False, predictions represent sampling error only.\n\n When add_resid is True, they also include residual error (which is\n more relevant to prediction).\n \n result_seq: list of model results\n years: sequence of times (in years) to make predictions for\n add_resid: boolean, whether to add in resampled residuals\n\n returns: sequence of predictions\n \"\"\"\n n = len(years)\n d = dict(Intercept=np.ones(n), years=years, years2=years**2)\n predict_df = pd.DataFrame(d)\n \n predict_seq = []\n for fake_results in result_seq:\n predict = fake_results.predict(predict_df)\n if add_resid:\n predict += thinkstats2.Resample(fake_results.resid, n)\n predict_seq.append(predict)\n\n return predict_seq",
"To visualize predictions, I show a darker region that quantifies modeling uncertainty and a lighter region that quantifies predictive uncertainty.",
"def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):\n \"\"\"Plots predictions.\n\n daily: DataFrame of daily prices\n years: sequence of times (in years) to make predictions for\n iters: number of simulations\n percent: what percentile range to show\n func: function that fits a model to the data\n \"\"\"\n result_seq = SimulateResults(daily, iters=iters, func=func)\n p = (100 - percent) / 2\n percents = p, 100-p\n\n predict_seq = GeneratePredictions(result_seq, years, add_resid=True)\n low, high = thinkstats2.PercentileRows(predict_seq, percents)\n thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')\n\n predict_seq = GeneratePredictions(result_seq, years, add_resid=False)\n low, high = thinkstats2.PercentileRows(predict_seq, percents)\n thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')",
"Here are the results for the high quality category.",
"years = np.linspace(0, 5, 101)\nthinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)\nPlotPredictions(daily, years)\nxlim = years[0]-0.1, years[-1]+0.1\nthinkplot.Config(title='Predictions',\n xlabel='Years',\n xlim=xlim,\n ylabel='Price per gram ($)')",
"But there is one more source of uncertainty: how much past data should we use to build the model?\nThe following function generates a sequence of models based on different amounts of past data.",
"def SimulateIntervals(daily, iters=101, func=RunLinearModel):\n \"\"\"Run simulations based on different subsets of the data.\n\n daily: DataFrame of daily prices\n iters: number of simulations\n func: function that fits a model to the data\n\n returns: list of result objects\n \"\"\"\n result_seq = []\n starts = np.linspace(0, len(daily), iters).astype(int)\n\n for start in starts[:-2]:\n subset = daily[start:]\n _, results = func(subset)\n fake = subset.copy()\n\n for _ in range(iters):\n fake.ppg = (results.fittedvalues + \n thinkstats2.Resample(results.resid))\n _, fake_results = func(fake)\n result_seq.append(fake_results)\n\n return result_seq",
"And this function plots the results.",
"def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):\n \"\"\"Plots predictions based on different intervals.\n\n daily: DataFrame of daily prices\n years: sequence of times (in years) to make predictions for\n iters: number of simulations\n percent: what percentile range to show\n func: function that fits a model to the data\n \"\"\"\n result_seq = SimulateIntervals(daily, iters=iters, func=func)\n p = (100 - percent) / 2\n percents = p, 100-p\n\n predict_seq = GeneratePredictions(result_seq, years, add_resid=True)\n low, high = thinkstats2.PercentileRows(predict_seq, percents)\n thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')",
"Here's what the high quality category looks like if we take into account uncertainty about how much past data to use.",
"name = 'high'\ndaily = dailies[name]\n\nthinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)\nPlotIntervals(daily, years)\nPlotPredictions(daily, years)\nxlim = years[0]-0.1, years[-1]+0.1\nthinkplot.Config(title='Predictions',\n xlabel='Years',\n xlim=xlim,\n ylabel='Price per gram ($)')",
"Exercises\nExercise: The linear model I used in this chapter has the obvious drawback that it is linear, and there is no reason to expect prices to change linearly over time. We can add flexibility to the model by adding a quadratic term, as we did in Section 11.3.\nUse a quadratic model to fit the time series of daily prices, and use the model to generate predictions. You will have to write a version of RunLinearModel that runs that quadratic model, but after that you should be able to reuse code from the chapter to generate predictions.",
"# Solution\n\ndef RunQuadraticModel(daily):\n \"\"\"Runs a linear model of prices versus years.\n\n daily: DataFrame of daily prices\n\n returns: model, results\n \"\"\"\n daily['years2'] = daily.years**2\n model = smf.ols('ppg ~ years + years2', data=daily)\n results = model.fit()\n return model, results\n\n# Solution\n\nname = 'high'\ndaily = dailies[name]\n\nmodel, results = RunQuadraticModel(daily)\nresults.summary() \n\n# Solution\n\nPlotFittedValues(model, results, label=name)\nthinkplot.Config(title='Fitted values',\n xlabel='Years',\n xlim=[-0.1, 3.8],\n ylabel='price per gram ($)')\n\n# Solution\n\nyears = np.linspace(0, 5, 101)\nthinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)\nPlotPredictions(daily, years, func=RunQuadraticModel)\nthinkplot.Config(title='predictions',\n xlabel='Years',\n xlim=[years[0]-0.1, years[-1]+0.1],\n ylabel='Price per gram ($)')",
"Exercise: Write a definition for a class named SerialCorrelationTest that extends HypothesisTest from Section 9.2. It should take a series and a lag as data, compute the serial correlation of the series with the given lag, and then compute the p-value of the observed correlation.\nUse this class to test whether the serial correlation in raw price data is statistically significant. Also test the residuals of the linear model and (if you did the previous exercise), the quadratic model.",
"# Solution\n\nclass SerialCorrelationTest(thinkstats2.HypothesisTest):\n \"\"\"Tests serial correlations by permutation.\"\"\"\n\n def TestStatistic(self, data):\n \"\"\"Computes the test statistic.\n\n data: tuple of xs and ys\n \"\"\"\n series, lag = data\n test_stat = abs(SerialCorr(series, lag))\n return test_stat\n\n def RunModel(self):\n \"\"\"Run the model of the null hypothesis.\n\n returns: simulated data\n \"\"\"\n series, lag = self.data\n permutation = series.reindex(np.random.permutation(series.index))\n return permutation, lag\n\n# Solution\n\n# test the correlation between consecutive prices\n\nname = 'high'\ndaily = dailies[name]\n\nseries = daily.ppg\ntest = SerialCorrelationTest((series, 1))\npvalue = test.PValue()\nprint(test.actual, pvalue)\n\n# Solution\n\n# test for serial correlation in residuals of the linear model\n\n_, results = RunLinearModel(daily)\nseries = results.resid\ntest = SerialCorrelationTest((series, 1))\npvalue = test.PValue()\nprint(test.actual, pvalue) \n\n# Solution\n\n# test for serial correlation in residuals of the quadratic model\n\n_, results = RunQuadraticModel(daily)\nseries = results.resid\ntest = SerialCorrelationTest((series, 1))\npvalue = test.PValue()\nprint(test.actual, pvalue)",
"Worked example: There are several ways to extend the EWMA model to generate predictions. One of the simplest is something like this:\n\n\nCompute the EWMA of the time series and use the last point as an intercept, inter.\n\n\nCompute the EWMA of differences between successive elements in the time series and use the last point as a slope, slope.\n\n\nTo predict values at future times, compute inter + slope * dt, where dt is the difference between the time of the prediction and the time of the last observation.",
"name = 'high'\ndaily = dailies[name]\n\nfilled = FillMissing(daily)\ndiffs = filled.ppg.diff()\n\nthinkplot.plot(diffs)\nplt.xticks(rotation=30)\nthinkplot.Config(ylabel='Daily change in price per gram ($)')\n\nfilled['slope'] = pd.ewma(diffs, span=365)\nthinkplot.plot(filled.slope[-365:])\nplt.xticks(rotation=30)\nthinkplot.Config(ylabel='EWMA of diff ($)')\n\n# extract the last inter and the mean of the last 30 slopes\nstart = filled.index[-1]\ninter = filled.ewma[-1]\nslope = filled.slope[-30:].mean()\n\nstart, inter, slope\n\n# reindex the DataFrame, adding a year to the end\ndates = pd.date_range(filled.index.min(), \n filled.index.max() + np.timedelta64(365, 'D'))\npredicted = filled.reindex(dates)\n\n# generate predicted values and add them to the end\npredicted['date'] = predicted.index\none_day = np.timedelta64(1, 'D')\npredicted['days'] = (predicted.date - start) / one_day\npredict = inter + slope * predicted.days\npredicted.ewma.fillna(predict, inplace=True)\n\n# plot the actual values and predictions\nthinkplot.Scatter(daily.ppg, alpha=0.1, label=name)\nthinkplot.Plot(predicted.ewma, color='#ff7f00')",
"As an exercise, run this analysis again for the other quality categories."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
geoneill12/phys202-2015-work
|
assignments/assignment05/InteractEx02.ipynb
|
mit
|
[
"Interact Exercise 2\nImports",
"%matplotlib inline\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom IPython.html.widgets import interact, interactive, fixed\nfrom IPython.display import display",
"Plotting with parameters\nWrite a plot_sin1(a, b) function that plots $sin(ax+b)$ over the interval $[0,4\\pi]$.\n\nCustomize your visualization to make it effective and beautiful.\nCustomize the box, grid, spines and ticks to match the requirements of this data.\nUse enough points along the x-axis to get a smooth plot.\nFor the x-axis tick locations use integer multiples of $\\pi$.\nFor the x-axis tick labels use multiples of pi using LaTeX: $3\\pi$.",
"def plot_sine1(a, b):\n x = np.arange(0, 4.01 * np.pi, 0.01 * np.pi)\n plt.plot(x, np.sin(a * x + b))\n plt.xlim(0, 4 * np.pi)\n plt.xticks([0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi], ['0', '$\\pi$', '$2\\pi$', '$3\\pi$', '$4\\pi$'])\n\nplot_sine1(5, 3.4)",
"Then use interact to create a user interface for exploring your function:\n\na should be a floating point slider over the interval $[0.0,5.0]$ with steps of $0.1$.\nb should be a floating point slider over the interval $[-5.0,5.0]$ with steps of $0.1$.",
"interact(plot_sine1, a = (0.0, 5.0, 0.1), b = (-5.0, 5.0, 0.1))\n\nassert True # leave this for grading the plot_sine1 exercise",
"In matplotlib, the line style and color can be set with a third argument to plot. Examples of this argument:\n\ndashed red: r--\nblue circles: bo\ndotted black: k.\n\nWrite a plot_sine2(a, b, style) function that has a third style argument that allows you to set the line style of the plot. The style should default to a blue line.",
"def plot_sine2(a, b, style):\n x = np.arange(0, 4.01 * np.pi, 0.01 * np.pi)\n plt.plot(x, np.sin(a * x + b), style)\n plt.xlim(0, 4 * np.pi)\n plt.xticks([0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi], ['0', '$\\pi$', '$2\\pi$', '$3\\pi$', '$4\\pi$'])\n\nplot_sine2(4.0, -1.0, 'r--')",
"Use interact to create a UI for plot_sine2.\n\nUse a slider for a and b as above.\nUse a drop down menu for selecting the line style between a dotted blue line line, black circles and red triangles.",
"interact(plot_sine2, a = (0.0, 5.0, 0.1), b = (-5.0, 5.0, 0.1), style = ('b.', 'ko', 'r^'))\n\nassert True # leave this for grading the plot_sine2 exercise"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
chapman-phys227-2016s/hw-3-ChinmaiRaman
|
HW3Notebook.ipynb
|
mit
|
[
"import numpy as np\nimport sympy as sp\nimport math\n\nimport loan as p1\nimport root_finder_examples as p2\nimport arclength as p3\nimport sin_Taylor_series_diffeq as p4\n\nimport matplotlib.pyplot as plt\n\n# Needed only in Jupyter to render properly in-notebook\n%matplotlib inline",
"Chinmai Raman\nHomework 3\nA.4 Solving a system of difference equations\nComputes the development of a loan over time.\nThe below function calculates the amount paid per month (the first array) and the amount left to be paid (the second array) at each month of the year at a principal of $10,000 to be paid over 1 year at annual interest rate of 6%",
"p1.loan(6, 10000, 12)",
"A.11 Testing different methods of root finding\n$f(x) = Sin(x)$",
"p2.graph(p2.f1, 100, -2 * np.pi, 2 * np.pi)\n\np2.Newton(p2.f1, p2.f1prime, -4)\n\np2.bisect(p2.f1, -4, -2)\n\np2.secant(p2.f1, -4.5, -3.5)",
"$f(x) = x - sin(x)$",
"p2.graph(p2.f2, 100, -np.pi, np.pi)\n\np2.Newton(p2.f2, p2.f2prime, 1)\n\np2.bisect(p2.f2, -1, 1)\n\np2.secant(p2.f2, -2, -1)",
"$f(x) = x^5 - sin x$",
"p2.graph(p2.f3, 100, -np.pi / 2, np.pi / 2)\n\np2.Newton(p2.f3, p2.f3prime, -1)\n\np2.bisect(p2.f3, -1, 1)\n\np2.secant(p2.f3, -1, -0.5)",
"$f(x) = x^4sinx$",
"p2.graph(p2.f4, 100, -2 * np.pi, 2 * np.pi)\n\np2.Newton(p2.f4, p2.f4prime, -4)\n\np2.bisect(p2.f4, -4, -2)\n\np2.secant(p2.f4, -5, -4)",
"$f(x) = x^4 - 16$",
"p2.graph(p2.f5, 100, -2 * np.pi, 2 * np.pi)\n\np2.Newton(p2.f5, p2.f5prime, -3)\n\np2.bisect(p2.f5, -3, -1)\n\np2.secant(p2.f5, -4, -3)",
"$f(x) = x^{10} - 1$",
"p2.graph(p2.f6, 100, -2 * np.pi, 2 * np.pi)\n\np2.Newton(p2.f6, p2.f6prime, 2)\n\np2.bisect(p2.f6, 0, 2)\n\np2.secant(p2.f6, 3, 2)",
"$tanh(x) - x^{10}$",
"p2.graph(p2.f7, 100, -2 * np.pi, 2 * np.pi)\n\np2.Newton(p2.f7, p2.f7prime, 1)\n\np2.bisect(p2.f7, 0.5, 2)\n\np2.secant(p2.f7, 3, 2)",
"A.13 Computing the arc length of a curve",
"h1 = -4 * (x)**2\nx = sp.Symbol('x')\nh2 = sp.exp(h1)\nh3 = 1 / np.sqrt(2 * np.pi) * h2\nlength = p3.arclength(h3, -2, 2, 10)\nprint length",
"The arclength of the function f(x) from -2 to 2 is 4.18",
"fig = plt.figure(1)\nx = np.linspace(-2, 2, 100)\ny = 1 / np.sqrt(2 * np.pi) * np.exp(-4 * x**2)\nx1 = length[0]\ny1 = length[1]\nplt.plot(x, y, 'r-', x1, y1, 'b-')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.title('1/sqrt(2pi) * e^(-4t^2)')\nplt.show(fig)",
"A.14 Finding difference equations for computing sin(x)\nThe accuracy of a Taylor polynomial improves as x decreases (moves closer to zero).",
"x = [-3 * np.pi / 4.0, -np.pi / 4.0, np.pi / 4.0, 3 * np.pi / 4]\nN = [5, 5, 5, 5]\nn = 0\nSn = []\nwhile n < 4:\n Sn.append(p4.sin_Taylor(x[n], N[n])[0])\n n += 1\nprint Sn",
"The accuracy of a Taylor polynomial also improves as n increases.",
"x = [np.pi / 4, np.pi / 4, np.pi / 4, np.pi / 4]\nN = [1, 3, 5, 10]\nn = 0\nSn = []\nwhile n < 4:\n Sn.append(p4.sin_Taylor(x[n], N[n])[0])\n n += 1\nprint Sn"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
csaladenes/csaladenes.github.io
|
present/bi2/2020/ubb/az_en_jupyter2_mappam/sklearn_tutorial/03.2-Regression-Forests.ipynb
|
mit
|
[
"<small><i>This notebook was put together by Jake Vanderplas. Source and license info is on GitHub.</i></small>\nSupervised Learning In-Depth: Random Forests\nPreviously we saw a powerful discriminative classifier, Support Vector Machines.\nHere we'll take a look at motivating another powerful algorithm. This one is a non-parametric algorithm called Random Forests.",
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\nplt.style.use('seaborn')",
"Motivating Random Forests: Decision Trees\nRandom forests are an example of an ensemble learner built on decision trees.\nFor this reason we'll start by discussing decision trees themselves.\nDecision trees are extremely intuitive ways to classify or label objects: you simply ask a series of questions designed to zero-in on the classification:",
"import fig_code\nfig_code.plot_example_decision_tree()",
"The binary splitting makes this extremely efficient.\nAs always, though, the trick is to ask the right questions.\nThis is where the algorithmic process comes in: in training a decision tree classifier, the algorithm looks at the features and decides which questions (or \"splits\") contain the most information.\nCreating a Decision Tree\nHere's an example of a decision tree classifier in scikit-learn. We'll start by defining some two-dimensional labeled data:",
"from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=1.0)\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='rainbow');",
"We have some convenience functions in the repository that help",
"from fig_code import visualize_tree, plot_tree_interactive",
"Now using IPython's interact (available in IPython 2.0+, and requires a live kernel) we can view the decision tree splits:",
"plot_tree_interactive(X, y);",
"Notice that at each increase in depth, every node is split in two except those nodes which contain only a single class.\nThe result is a very fast non-parametric classification, and can be extremely useful in practice.\nQuestion: Do you see any problems with this?\nDecision Trees and over-fitting\nOne issue with decision trees is that it is very easy to create trees which over-fit the data. That is, they are flexible enough that they can learn the structure of the noise in the data rather than the signal! For example, take a look at two trees built on two subsets of this dataset:",
"from sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier()\n\nplt.figure()\nvisualize_tree(clf, X[:200], y[:200], boundaries=False)\nplt.figure()\nvisualize_tree(clf, X[-200:], y[-200:], boundaries=False)",
"The details of the classifications are completely different! That is an indication of over-fitting: when you predict the value for a new point, the result is more reflective of the noise in the model rather than the signal.\nEnsembles of Estimators: Random Forests\nOne possible way to address over-fitting is to use an Ensemble Method: this is a meta-estimator which essentially averages the results of many individual estimators which over-fit the data. Somewhat surprisingly, the resulting estimates are much more robust and accurate than the individual estimates which make them up!\nOne of the most common ensemble methods is the Random Forest, in which the ensemble is made up of many decision trees which are in some way perturbed.\nThere are volumes of theory and precedent about how to randomize these trees, but as an example, let's imagine an ensemble of estimators fit on subsets of the data. We can get an idea of what these might look like as follows:",
"def fit_randomized_tree(random_state=0):\n X, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=2.0)\n clf = DecisionTreeClassifier(max_depth=15)\n \n rng = np.random.RandomState(random_state)\n i = np.arange(len(y))\n rng.shuffle(i)\n visualize_tree(clf, X[i[:250]], y[i[:250]], boundaries=False,\n xlim=(X[:, 0].min(), X[:, 0].max()),\n ylim=(X[:, 1].min(), X[:, 1].max()))\n \nfrom ipywidgets import interact\ninteract(fit_randomized_tree, random_state=(0, 100));",
"See how the details of the model change as a function of the sample, while the larger characteristics remain the same!\nThe random forest classifier will do something similar to this, but use a combined version of all these trees to arrive at a final answer:",
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100, random_state=0)\nvisualize_tree(clf, X, y, boundaries=False);",
"By averaging over 100 randomly perturbed models, we end up with an overall model which is a much better fit to our data!\n(Note: above we randomized the model through sub-sampling... Random Forests use more sophisticated means of randomization, which you can read about in, e.g. the scikit-learn documentation)\nQuick Example: Moving to Regression\nAbove we were considering random forests within the context of classification.\nRandom forests can also be made to work in the case of regression (that is, continuous rather than categorical variables). The estimator to use for this is sklearn.ensemble.RandomForestRegressor.\nLet's quickly demonstrate how this can be used:",
"from sklearn.ensemble import RandomForestRegressor\n\nx = 10 * np.random.rand(100)\n\ndef model(x, sigma=0.3):\n fast_oscillation = np.sin(5 * x)\n slow_oscillation = np.sin(0.5 * x)\n noise = sigma * np.random.randn(len(x))\n\n return slow_oscillation + fast_oscillation + noise\n\ny = model(x)\nplt.errorbar(x, y, 0.3, fmt='o');\n\nxfit = np.linspace(0, 10, 1000)\nyfit = RandomForestRegressor(100).fit(x[:, None], y).predict(xfit[:, None])\nytrue = model(xfit, 0)\n\nplt.errorbar(x, y, 0.3, fmt='o')\nplt.plot(xfit, yfit, '-r');\nplt.plot(xfit, ytrue, '-k', alpha=0.5);",
"As you can see, the non-parametric random forest model is flexible enough to fit the multi-period data, without us even specifying a multi-period model!\nExample: Random Forest for Classifying Digits\nWe previously saw the hand-written digits data. Let's use that here to test the efficacy of the SVM and Random Forest classifiers.",
"from sklearn.datasets import load_digits\ndigits = load_digits()\ndigits.keys()\n\nX = digits.data\ny = digits.target\nprint(X.shape)\nprint(y.shape)",
"To remind us what we're looking at, we'll visualize the first few data points:",
"# set up the figure\nfig = plt.figure(figsize=(6, 6)) # figure size in inches\nfig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n# plot the digits: each image is 8x8 pixels\nfor i in range(64):\n ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])\n ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')\n \n # label the image with the target value\n ax.text(0, 7, str(digits.target[i]))",
"We can quickly classify the digits using a decision tree as follows:",
"from sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=0)\nclf = DecisionTreeClassifier(max_depth=11)\nclf.fit(Xtrain, ytrain)\nypred = clf.predict(Xtest)",
"We can check the accuracy of this classifier:",
"metrics.accuracy_score(ypred, ytest)",
"and for good measure, plot the confusion matrix:",
"plt.imshow(metrics.confusion_matrix(ypred, ytest),\n interpolation='nearest', cmap=plt.cm.binary)\nplt.grid(False)\nplt.colorbar()\nplt.xlabel(\"predicted label\")\nplt.ylabel(\"true label\");",
"Exercise\n\nRepeat this classification task with sklearn.ensemble.RandomForestClassifier. How does the max_depth, max_features, and n_estimators affect the results?\nTry this classification with sklearn.svm.SVC, adjusting kernel, C, and gamma. Which classifier performs optimally?\nTry a few sets of parameters for each model and check the F1 score (sklearn.metrics.f1_score) on your results. What's the best F1 score you can reach?"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
tranlyvu/autonomous-vehicle-projects
|
Behavior Cloning/notebook/Behavior Cloning.ipynb
|
apache-2.0
|
[
"Self-driving car Nanodegree - Term 1\nProject 2: Behavior Cloning\n\nIn this project, We built and trained a convolutional neural network for end-to-end driving in a simulator, using TensorFlow and Keras. We used optimization techniques such as regularization and dropout to generalize the network for driving on multiple tracks. The model will output a steering angle to an autonomous vehicle.\nThe goals / steps of this project are the following:\n* Use the simulator to collect data of good driving behavior \n* Design, train and validate a model that predicts a steering angle from image data\n* Use the model to drive the vehicle autonomously around the first track in the simulator. The vehicle should remain on the road for an entire loop around the track.\n* Summarize the results with a written report\nAuthor : Tran Ly Vu\n\nGithub repo\nNotebook\nPython code\n\nFiles Submitted\nMy project includes the following files:\n- src/model.py containing the script to create and train the model\n- drive.py for driving the car in autonomous mode\n- model.h5 containing a trained convolution neural network\n- notebook containing notebook\n- run1.mp4 containing sample video of driving the car in autonomous mode using trained model\nUsing the Udacity provided simulator and my drive.py file, I was able to test my model by driving autonomously around the track by executing:\npython drive.py model.h5\nVideo of driving the car was generated by executing:\npython drive.py model.h5 run1\npython video.py run1 --fps 48\nImporting packages",
"import csv\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Convolution2D, Cropping2D, Lambda, Dropout\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport sklearn\nfrom keras.layers.pooling import MaxPooling2D\n# Visualizations will be shown in the notebook.\n%matplotlib inline",
"Dataset\nI used training dataset provided by Udacity. I use all 3 positions of camera with correction of 0.25 , i.e addition of 0.25 to steering angle for left-positioned camera and substraction of 0.25 for right-positioned camera.\nI could have self-produced ore data but due to time constraint, I only used Udacity dataset\nMoreover, after unable to complete a whole lap, I follow advice from forum and decided to randomly choose camera to select from\nThe dataset is split into 20% of test set. Also, the training set is shuffled before training\nLoading Dataset",
"'''Read data'''\nimage_path = '../../../data'\n# row in log path is IMG/<name>\ndriving_log_path = '../../../data/driving_log.csv'\n\nrows = []\nwith open(driving_log_path) as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n rows.append(row)",
"Model Architecture and Training Strategy\nFirst Model\nIn my first attempt, I used 9-layers network from end to end learning for self-driving cars by NVIDIA \n\nPre-processing pipeline\nData augmentation: Fliping the image horizontal (from function append_data)\nCropping the image\nNormalization and Mean centering\n\n\nNVIDIA original model\n\n|Layer |type |output filter/neurons|\n|--------|--------|--------|\n|1 |conv |24 |\n|2 |conv |36 |\n|3 |conv |48 |\n|4 |conv |64 |\n|5 |conv |64 |\n|6 |flattern|1164 |\n|7 |relu |100 |\n|8 |relu |50 |\n|9 |relu |10 |\n|10 |relu |1 |\nSecond Attempt\nHowever, I detected overfitting in my first attempt, and hence i tried to improved the mode in second model by using regulation, i.e dropout\n\nPre-processing pipeline\n - Data augmentation: Fliping the image horizontal (from function append_data)\n - Cropping the image\n - Normalization and Mean centering\nModified NVIDIA model\n\n|Layer |type |output filter/neurons|\n|--------|--------|--------|\n|1 |conv |24 |\n| |dropout | |\n|2 |conv |36 |\n| |dropout | |\n|3 |conv |48 |\n| |dropout | |\n|4 |conv |64 |\n|5 |conv |64 |\n|6 |flattern|1164 |\n|7 |relu |100 |\n|8 |relu |50 |\n|9 |relu |10 |\n|10 |relu |1 |",
"def append_data(col, images, measurement, steering_measurements):\n current_path = image_path + '/' + col.strip()\n \n image = cv2.imread(current_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n images.append(np.asarray(image))\n steering_measurements.append(measurement)\n \n # random flipping\n flip_prob = np.random.random()\n if flip_prob > 0.5:\n image_flipped = np.fliplr(image)\n images.append(np.asarray(image_flipped))\n measurement_flipped = measurement * (-1)\n steering_measurements.append(measurement)\n \n\ndef images_and_measurements(sample):\n images = []\n steering_measurements = []\n for line in sample[0:]:\n measurement = float(line[3])\n ## random data\n camera = np.random.choice(['center', 'left', 'right'])\n if camera == 'center':\n col_center = line[0]\n append_data(col_center, images, measurement, steering_measurements)\n elif camera == 'left':\n col_left = line[1]\n append_data(col_left, images, measurement + 0.25, steering_measurements)\n else:\n col_right = line[2]\n append_data(col_right, images, measurement - 0.25, steering_measurements)\n return images, steering_measurements\n\ndef generator(samples, batch_size = 32):\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset + batch_size] \n images = []\n measurements = []\n for image, measurement in batch_samples:\n images.append(image) \n measurements.append(measurement)\n # trim image to only see section with road\n x_train = np.array(images)\n y_train = np.array(measurements)\n yield sklearn.utils.shuffle(x_train, y_train)\n\n\n## Print total number of data , including augmentation\nX_total, y_total = images_and_measurements(rows[1:])\nprint(\"Number of image is: \", len(X_total))\nprint(\"Number of measurement is: \", len(y_total))",
"Model Architecture Definition",
"model = Sequential()\n#The cameras in the simulator capture 160 pixel by 320 pixel images., after cropping, it is 66x200\nmodel.add(Cropping2D(cropping = ((74,20), (60,60)),input_shape=(160, 320, 3)))\n\nmodel.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(66, 200, 3)))\nmodel.add(Convolution2D(24, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Dropout(.5))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Dropout(.5))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2,2), activation='relu'))\nmodel.add(Dropout(.5))\nmodel.add(Convolution2D(64, 3, 3, activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, activation='relu')) \nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\nmodel.add(Dense(1))\n'''Training: using MSE for regression'''\nmodel.compile(loss='mse', optimizer='adam')",
"Model Training\nFor every time of training and parameter tunning, the model was tested by running it through the simulator and ensuring that the vehicle could stay on the track.\nAt epoch of 10, the training and validation loss both went down fast and I think they would converge if I'd have increased number of epoch (graph is plotted in the notebook. However, for this kind of regression problem, both trainning loss and accuracy do not seem to be useful, it is more important to test it on the simulator provided by Udacity. Therefore, I simply tune number of epoch until the vehicle run well on the track.\nFinal Model parameters:\n- Optimizer: Adam optimizer, so the learning rate was not tuned manually \n- Epoch: 5\n- Batch size: 32",
"print('Training model') \nsamples = list(zip(X_total, y_total)) \ntrain_samples, validation_samples = train_test_split(samples, test_size = 0.2)\ntrain_generator = generator(train_samples, batch_size = 32)\nvalidation_generator = generator(validation_samples, batch_size = 32)\n\nhistory_object = model.fit_generator(train_generator,\n samples_per_epoch = len(train_samples),\n validation_data = validation_generator,\n nb_val_samples = len(validation_samples),\n nb_epoch = 5, \n verbose = 1)\nprint('Endding training, starting to save model')\nmodel.save('../model.h5')\n\nprint(history_object.history.keys())\n###plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()",
"The project video is here."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/end_to_end_ml/solutions/keras_dnn_babyweight.ipynb
|
apache-2.0
|
[
"Creating Keras DNN model\nLearning Objectives\n\nCreate input layers for raw features\nCreate feature columns for inputs\nCreate DNN dense hidden layers and output layer\nBuild DNN model tying all of the pieces together\nTrain and evaluate\n\nIntroduction\nIn this notebook, we'll be using Keras to create a DNN model to predict the weight of a baby before it is born.\nWe'll start by defining the CSV column names, label column, and column defaults for our data inputs. Then, we'll construct a tf.data Dataset of features and the label from the CSV files and create inputs layers for the raw features. Next, we'll set up feature columns for the model inputs and build a deep neural network in Keras. We'll create a custom evaluation metric and build our DNN model. Finally, we'll train and evaluate our model.\nEach learning objective will correspond to a #TODO in the student lab notebook -- try to complete that notebook first before reviewing this solution notebook.\nSet up environment variables and load necessary libraries",
"!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst\n\n!pip install --user google-cloud-bigquery==1.25.0",
"Note: Restart your kernel to use updated packages.\nKindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.\nImport necessary libraries.",
"from google.cloud import bigquery\nimport pandas as pd\nimport datetime\nimport os\nimport shutil\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nprint(tf.__version__)",
"Set environment variables so that we can use them throughout the notebook.",
"%%bash\nexport PROJECT=$(gcloud config list project --format \"value(core.project)\")\necho \"Your current GCP Project Name is: \"$PROJECT\n\nPROJECT = \"cloud-training-demos\" # Replace with your PROJECT",
"Create ML datasets by sampling using BigQuery\nWe'll begin by sampling the BigQuery data to create smaller datasets. Let's create a BigQuery client that we'll use throughout the lab.",
"bq = bigquery.Client(project = PROJECT)",
"We need to figure out the right way to divide our hash values to get our desired splits. To do that we need to define some values to hash within the module. Feel free to play around with these values to get the perfect combination.",
"modulo_divisor = 100\ntrain_percent = 80.0\neval_percent = 10.0\n\ntrain_buckets = int(modulo_divisor * train_percent / 100.0)\neval_buckets = int(modulo_divisor * eval_percent / 100.0)",
"We can make a series of queries to check if our bucketing values result in the correct sizes of each of our dataset splits and then adjust accordingly. Therefore, to make our code more compact and reusable, let's define a function to return the head of a dataframe produced from our queries up to a certain number of rows.",
"def display_dataframe_head_from_query(query, count=10):\n \"\"\"Displays count rows from dataframe head from query.\n \n Args:\n query: str, query to be run on BigQuery, results stored in dataframe.\n count: int, number of results from head of dataframe to display.\n Returns:\n Dataframe head with count number of results.\n \"\"\"\n df = bq.query(\n query + \" LIMIT {limit}\".format(\n limit=count)).to_dataframe()\n\n return df.head(count)",
"For our first query, we're going to use the original query above to get our label, features, and columns to combine into our hash which we will use to perform our repeatable splitting. There are only a limited number of years, months, days, and states in the dataset. Let's see what the hash values are. We will need to include all of these extra columns to hash on to get a fairly uniform spread of the data. Feel free to try less or more in the hash and see how it changes your results.",
"# Get label, features, and columns to hash and split into buckets\nhash_cols_fixed_query = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n year,\n month,\n CASE\n WHEN day IS NULL THEN\n CASE\n WHEN wday IS NULL THEN 0\n ELSE wday\n END\n ELSE day\n END AS date,\n IFNULL(state, \"Unknown\") AS state,\n IFNULL(mother_birth_state, \"Unknown\") AS mother_birth_state\nFROM\n publicdata.samples.natality\nWHERE\n year > 2000\n AND weight_pounds > 0\n AND mother_age > 0\n AND plurality > 0\n AND gestation_weeks > 0\n\"\"\"\n\ndisplay_dataframe_head_from_query(hash_cols_fixed_query)",
"Using COALESCE would provide the same result as the nested CASE WHEN. This is preferable when all we want is the first non-null instance. To be precise the CASE WHEN would become COALESCE(wday, day, 0) AS date. You can read more about it here.\nNext query will combine our hash columns and will leave us just with our label, features, and our hash values.",
"data_query = \"\"\"\nSELECT\n weight_pounds,\n is_male,\n mother_age,\n plurality,\n gestation_weeks,\n FARM_FINGERPRINT(\n CONCAT(\n CAST(year AS STRING),\n CAST(month AS STRING),\n CAST(date AS STRING),\n CAST(state AS STRING),\n CAST(mother_birth_state AS STRING)\n )\n ) AS hash_values\nFROM\n ({CTE_hash_cols_fixed})\n\"\"\".format(CTE_hash_cols_fixed=hash_cols_fixed_query)\n\ndisplay_dataframe_head_from_query(data_query)",
"The next query is going to find the counts of each of the unique 657484 hash_values. This will be our first step at making actual hash buckets for our split via the GROUP BY.",
"# Get the counts of each of the unique hash of our splitting column\nfirst_bucketing_query = \"\"\"\nSELECT\n hash_values,\n COUNT(*) AS num_records\nFROM\n ({CTE_data})\nGROUP BY\n hash_values\n\"\"\".format(CTE_data=data_query)\n\ndisplay_dataframe_head_from_query(first_bucketing_query)",
"The query below performs a second layer of bucketing where now for each of these bucket indices we count the number of records.",
"# Get the number of records in each of the hash buckets\nsecond_bucketing_query = \"\"\"\nSELECT\n ABS(MOD(hash_values, {modulo_divisor})) AS bucket_index,\n SUM(num_records) AS num_records\nFROM\n ({CTE_first_bucketing})\nGROUP BY\n ABS(MOD(hash_values, {modulo_divisor}))\n\"\"\".format(\n CTE_first_bucketing=first_bucketing_query, modulo_divisor=modulo_divisor)\n\ndisplay_dataframe_head_from_query(second_bucketing_query)",
"The number of records is hard for us to easily understand the split, so we will normalize the count into percentage of the data in each of the hash buckets in the next query.",
"# Calculate the overall percentages\npercentages_query = \"\"\"\nSELECT\n bucket_index,\n num_records,\n CAST(num_records AS FLOAT64) / (\n SELECT\n SUM(num_records)\n FROM\n ({CTE_second_bucketing})) AS percent_records\nFROM\n ({CTE_second_bucketing})\n\"\"\".format(CTE_second_bucketing=second_bucketing_query)\n\ndisplay_dataframe_head_from_query(percentages_query)",
"We'll now select the range of buckets to be used in training.",
"# Choose hash buckets for training and pull in their statistics\ntrain_query = \"\"\"\nSELECT\n *,\n \"train\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= 0\n AND bucket_index < {train_buckets}\n\"\"\".format(\n CTE_percentages=percentages_query,\n train_buckets=train_buckets)\n\ndisplay_dataframe_head_from_query(train_query)",
"We'll do the same by selecting the range of buckets to be used evaluation.",
"# Choose hash buckets for validation and pull in their statistics\neval_query = \"\"\"\nSELECT\n *,\n \"eval\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= {train_buckets}\n AND bucket_index < {cum_eval_buckets}\n\"\"\".format(\n CTE_percentages=percentages_query,\n train_buckets=train_buckets,\n cum_eval_buckets=train_buckets + eval_buckets)\n\ndisplay_dataframe_head_from_query(eval_query)",
"Lastly, we'll select the hash buckets to be used for the test split.",
"# Choose hash buckets for testing and pull in their statistics\ntest_query = \"\"\"\nSELECT\n *,\n \"test\" AS dataset_name\nFROM\n ({CTE_percentages})\nWHERE\n bucket_index >= {cum_eval_buckets}\n AND bucket_index < {modulo_divisor}\n\"\"\".format(\n CTE_percentages=percentages_query,\n cum_eval_buckets=train_buckets + eval_buckets,\n modulo_divisor=modulo_divisor)\n\ndisplay_dataframe_head_from_query(test_query)",
"In the below query, we'll UNION ALL all of the datasets together so that all three sets of hash buckets will be within one table. We added dataset_id so that we can sort on it in the query after.",
"# Union the training, validation, and testing dataset statistics\nunion_query = \"\"\"\nSELECT\n 0 AS dataset_id,\n *\nFROM\n ({CTE_train})\nUNION ALL\nSELECT\n 1 AS dataset_id,\n *\nFROM\n ({CTE_eval})\nUNION ALL\nSELECT\n 2 AS dataset_id,\n *\nFROM\n ({CTE_test})\n\"\"\".format(CTE_train=train_query, CTE_eval=eval_query, CTE_test=test_query)\n\ndisplay_dataframe_head_from_query(union_query)",
"Lastly, we'll show the final split between train, eval, and test sets. We can see both the number of records and percent of the total data. It is really close to that we were hoping to get.",
"# Show final splitting and associated statistics\nsplit_query = \"\"\"\nSELECT\n dataset_id,\n dataset_name,\n SUM(num_records) AS num_records,\n SUM(percent_records) AS percent_records\nFROM\n ({CTE_union})\nGROUP BY\n dataset_id,\n dataset_name\nORDER BY\n dataset_id\n\"\"\".format(CTE_union=union_query)\n\ndisplay_dataframe_head_from_query(split_query)",
"Now that we know that our splitting values produce a good global splitting on our data, here's a way to get a well-distributed portion of the data in such a way that the train, eval, test sets do not overlap and takes a subsample of our global splits.",
"# every_n allows us to subsample from each of the hash values\n# This helps us get approximately the record counts we want\nevery_n = 1000\n\nsplitting_string = \"ABS(MOD(hash_values, {0} * {1}))\".format(every_n, modulo_divisor)\n\ndef create_data_split_sample_df(query_string, splitting_string, lo, up):\n \"\"\"Creates a dataframe with a sample of a data split.\n\n Args:\n query_string: str, query to run to generate splits.\n splitting_string: str, modulo string to split by.\n lo: float, lower bound for bucket filtering for split.\n up: float, upper bound for bucket filtering for split.\n Returns:\n Dataframe containing data split sample.\n \"\"\"\n query = \"SELECT * FROM ({0}) WHERE {1} >= {2} and {1} < {3}\".format(\n query_string, splitting_string, int(lo), int(up))\n\n df = bq.query(query).to_dataframe()\n\n return df\n\ntrain_df = create_data_split_sample_df(\n data_query, splitting_string,\n lo=0, up=train_percent)\n\neval_df = create_data_split_sample_df(\n data_query, splitting_string,\n lo=train_percent, up=train_percent + eval_percent)\n\ntest_df = create_data_split_sample_df(\n data_query, splitting_string,\n lo=train_percent + eval_percent, up=modulo_divisor)\n\nprint(\"There are {} examples in the train dataset.\".format(len(train_df)))\nprint(\"There are {} examples in the validation dataset.\".format(len(eval_df)))\nprint(\"There are {} examples in the test dataset.\".format(len(test_df)))",
"Preprocess data using Pandas\nWe'll perform a few preprocessing steps to the data in our dataset. Let's add extra rows to simulate the lack of ultrasound. That is we'll duplicate some rows and make the is_male field be Unknown. Also, if there is more than child we'll change the plurality to Multiple(2+). While we're at it, we'll also change the plurality column to be a string. We'll perform these operations below. \nLet's start by examining the training dataset as is.",
"train_df.head()",
"Also, notice that there are some very important numeric fields that are missing in some rows (the count in Pandas doesn't count missing data)",
"train_df.describe()",
"It is always crucial to clean raw data before using in machine learning, so we have a preprocessing step. We'll define a preprocess function below. Note that the mother's age is an input to our model so users will have to provide the mother's age; otherwise, our service won't work. The features we use for our model were chosen because they are such good predictors and because they are easy enough to collect.",
"def preprocess(df):\n \"\"\" Preprocess pandas dataframe for augmented babyweight data.\n \n Args:\n df: Dataframe containing raw babyweight data.\n Returns:\n Pandas dataframe containing preprocessed raw babyweight data as well\n as simulated no ultrasound data masking some of the original data.\n \"\"\"\n # Clean up raw data\n # Filter out what we don\"t want to use for training\n df = df[df.weight_pounds > 0]\n df = df[df.mother_age > 0]\n df = df[df.gestation_weeks > 0]\n df = df[df.plurality > 0]\n\n # Modify plurality field to be a string\n twins_etc = dict(zip([1,2,3,4,5],\n [\"Single(1)\",\n \"Twins(2)\",\n \"Triplets(3)\",\n \"Quadruplets(4)\",\n \"Quintuplets(5)\"]))\n df[\"plurality\"].replace(twins_etc, inplace=True)\n\n # Clone data and mask certain columns to simulate lack of ultrasound\n no_ultrasound = df.copy(deep=True)\n\n # Modify is_male\n no_ultrasound[\"is_male\"] = \"Unknown\"\n \n # Modify plurality\n condition = no_ultrasound[\"plurality\"] != \"Single(1)\"\n no_ultrasound.loc[condition, \"plurality\"] = \"Multiple(2+)\"\n\n # Concatenate both datasets together and shuffle\n return pd.concat(\n [df, no_ultrasound]).sample(frac=1).reset_index(drop=True)",
"Let's process the train, eval, test set and see a small sample of the training data after our preprocessing:",
"train_df = preprocess(train_df)\neval_df = preprocess(eval_df)\ntest_df = preprocess(test_df)\n\ntrain_df.head()\n\ntrain_df.tail()",
"Let's look again at a summary of the dataset. Note that we only see numeric columns, so plurality does not show up.",
"train_df.describe()",
"Write to .csv files\nIn the final versions, we want to read from files, not Pandas dataframes. So, we write the Pandas dataframes out as csv files. Using csv files gives us the advantage of shuffling during read. This is important for distributed training because some workers might be slower than others, and shuffling the data helps prevent the same data from being assigned to the slow workers.",
"# Define columns\ncolumns = [\"weight_pounds\",\n \"is_male\",\n \"mother_age\",\n \"plurality\",\n \"gestation_weeks\"]\n\n# Write out CSV files\ntrain_df.to_csv(\n path_or_buf=\"train.csv\", columns=columns, header=False, index=False)\neval_df.to_csv(\n path_or_buf=\"eval.csv\", columns=columns, header=False, index=False)\ntest_df.to_csv(\n path_or_buf=\"test.csv\", columns=columns, header=False, index=False)\n\n%%bash\nwc -l *.csv\n\n%%bash\nhead *.csv\n\n%%bash\ntail *.csv\n\n%%bash\nls *.csv\n\n%%bash\nhead -5 *.csv",
"Create Keras model\nSet CSV Columns, label column, and column defaults.\nNow that we have verified that our CSV files exist, we need to set a few things that we will be using in our input function.\n* CSV_COLUMNS is going to be our header name of our column. Make sure that they are in the same order as in the CSV files\n* LABEL_COLUMN is the header name of the column that is our label. We will need to know this to pop it from our features dictionary.\n* DEFAULTS is a list with the same length as CSV_COLUMNS, i.e. there is a default for each column in our CSVs. Each element is a list itself with the default value for that CSV column.",
"# Determine CSV, label, and key columns\n# Create list of string column headers, make sure order matches.\nCSV_COLUMNS = [\"weight_pounds\",\n \"is_male\",\n \"mother_age\",\n \"plurality\",\n \"gestation_weeks\"]\n\n# Add string name for label column\nLABEL_COLUMN = \"weight_pounds\"\n\n# Set default values for each CSV column as a list of lists.\n# Treat is_male and plurality as strings.\nDEFAULTS = [[0.0], [\"null\"], [0.0], [\"null\"], [0.0]]",
"Make dataset of features and label from CSV files.\nNext, we will write an input_fn to read the data. Since we are reading from CSV files we can save ourselves from trying to recreate the wheel and can use tf.data.experimental.make_csv_dataset. This will create a CSV dataset object. However we will need to divide the columns up into features and a label. We can do this by applying the map method to our dataset and popping our label column off of our dictionary of feature tensors.",
"def features_and_labels(row_data):\n \"\"\"Splits features and labels from feature dictionary.\n\n Args:\n row_data: Dictionary of CSV column names and tensor values.\n Returns:\n Dictionary of feature tensors and label tensor.\n \"\"\"\n label = row_data.pop(LABEL_COLUMN)\n\n return row_data, label # features, label\n\n\ndef load_dataset(pattern, batch_size=1, mode='eval'):\n \"\"\"Loads dataset using the tf.data API from CSV files.\n\n Args:\n pattern: str, file pattern to glob into list of files.\n batch_size: int, the number of examples per batch.\n mode: 'train' | 'eval' to determine if training or evaluating.\n Returns:\n `Dataset` object.\n \"\"\"\n # Make a CSV dataset\n dataset = tf.data.experimental.make_csv_dataset(\n file_pattern=pattern,\n batch_size=batch_size,\n column_names=CSV_COLUMNS,\n column_defaults=DEFAULTS,\n ignore_errors=True)\n\n # Map dataset to features and label\n dataset = dataset.map(map_func=features_and_labels) # features, label\n\n # Shuffle and repeat for training\n if mode == 'train':\n dataset = dataset.shuffle(buffer_size=1000).repeat()\n\n # Take advantage of multi-threading; 1=AUTOTUNE\n dataset = dataset.prefetch(buffer_size=1)\n\n return dataset",
"Create input layers for raw features.\nWe'll need to get the data to read in by our input function to our model function, but just how do we go about connecting the dots? We can use Keras input layers (tf.Keras.layers.Input) by defining:\n* shape: A shape tuple (integers), not including the batch size. For instance, shape=(32,) indicates that the expected input will be batches of 32-dimensional vectors. Elements of this tuple can be None; 'None' elements represent dimensions where the shape is not known.\n* name: An optional name string for the layer. Should be unique in a model (do not reuse the same name twice). It will be autogenerated if it isn't provided.\n* dtype: The data type expected by the input, as a string (float32, float64, int32...)",
"# TODO 1\ndef create_input_layers():\n \"\"\"Creates dictionary of input layers for each feature.\n\n Returns:\n Dictionary of `tf.Keras.layers.Input` layers for each feature.\n \"\"\"\n inputs = {\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"float32\")\n for colname in [\"mother_age\", \"gestation_weeks\"]}\n\n inputs.update({\n colname: tf.keras.layers.Input(\n name=colname, shape=(), dtype=\"string\")\n for colname in [\"is_male\", \"plurality\"]})\n\n return inputs",
"Create feature columns for inputs.\nNext, define the feature columns. mother_age and gestation_weeks should be numeric. The others, is_male and plurality, should be categorical. Remember, only dense feature columns can be inputs to a DNN.",
"# TODO 2\ndef categorical_fc(name, values):\n \"\"\"Helper function to wrap categorical feature by indicator column.\n\n Args:\n name: str, name of feature.\n values: list, list of strings of categorical values.\n Returns:\n Indicator column of categorical feature.\n \"\"\"\n cat_column = tf.feature_column.categorical_column_with_vocabulary_list(\n key=name, vocabulary_list=values)\n\n return tf.feature_column.indicator_column(categorical_column=cat_column)\n\n\ndef create_feature_columns():\n \"\"\"Creates dictionary of feature columns from inputs.\n\n Returns:\n Dictionary of feature columns.\n \"\"\"\n feature_columns = {\n colname : tf.feature_column.numeric_column(key=colname)\n for colname in [\"mother_age\", \"gestation_weeks\"]\n }\n\n feature_columns[\"is_male\"] = categorical_fc(\n \"is_male\", [\"True\", \"False\", \"Unknown\"])\n feature_columns[\"plurality\"] = categorical_fc(\n \"plurality\", [\"Single(1)\", \"Twins(2)\", \"Triplets(3)\",\n \"Quadruplets(4)\", \"Quintuplets(5)\", \"Multiple(2+)\"])\n\n return feature_columns",
"Create DNN dense hidden layers and output layer.\nSo we've figured out how to get our inputs ready for machine learning but now we need to connect them to our desired output. Our model architecture is what links the two together. Let's create some hidden dense layers beginning with our inputs and end with a dense output layer. This is regression so make sure the output layer activation is correct and that the shape is right.",
"# TODO 3\ndef get_model_outputs(inputs):\n \"\"\"Creates model architecture and returns outputs.\n\n Args:\n inputs: Dense tensor used as inputs to model.\n Returns:\n Dense tensor output from the model.\n \"\"\"\n # Create two hidden layers of [64, 32] just in like the BQML DNN\n h1 = tf.keras.layers.Dense(64, activation=\"relu\", name=\"h1\")(inputs)\n h2 = tf.keras.layers.Dense(32, activation=\"relu\", name=\"h2\")(h1)\n\n # Final output is a linear activation because this is regression\n output = tf.keras.layers.Dense(\n units=1, activation=\"linear\", name=\"weight\")(h2)\n\n return output",
"Create custom evaluation metric.\nWe want to make sure that we have some useful way to measure model performance for us. Since this is regression, we would like to know the RMSE of the model on our evaluation dataset, however, this does not exist as a standard evaluation metric, so we'll have to create our own by using the true and predicted labels.",
"def rmse(y_true, y_pred):\n \"\"\"Calculates RMSE evaluation metric.\n\n Args:\n y_true: tensor, true labels.\n y_pred: tensor, predicted labels.\n Returns:\n Tensor with value of RMSE between true and predicted labels.\n \"\"\"\n return tf.sqrt(tf.reduce_mean((y_pred - y_true) ** 2))",
"Build DNN model tying all of the pieces together.\nExcellent! We've assembled all of the pieces, now we just need to tie them all together into a Keras Model. This is a simple feedforward model with no branching, side inputs, etc. so we could have used Keras' Sequential Model API but just for fun we're going to use Keras' Functional Model API. Here we will build the model using tf.keras.models.Model giving our inputs and outputs and then compile our model with an optimizer, a loss function, and evaluation metrics.",
"# TODO 4\ndef build_dnn_model():\n \"\"\"Builds simple DNN using Keras Functional API.\n\n Returns:\n `tf.keras.models.Model` object.\n \"\"\"\n # Create input layer\n inputs = create_input_layers()\n\n # Create feature columns\n feature_columns = create_feature_columns()\n\n # The constructor for DenseFeatures takes a list of numeric columns\n # The Functional API in Keras requires: LayerConstructor()(inputs)\n dnn_inputs = tf.keras.layers.DenseFeatures(\n feature_columns=feature_columns.values())(inputs)\n\n # Get output of model given inputs\n output = get_model_outputs(dnn_inputs)\n\n # Build model and compile it all together\n model = tf.keras.models.Model(inputs=inputs, outputs=output)\n model.compile(optimizer=\"adam\", loss=\"mse\", metrics=[rmse, \"mse\"])\n\n return model\n\nprint(\"Here is our DNN architecture so far:\\n\")\nmodel = build_dnn_model()\nprint(model.summary())",
"We can visualize the DNN using the Keras plot_model utility.",
"tf.keras.utils.plot_model(\n model=model, to_file=\"dnn_model.png\", show_shapes=False, rankdir=\"LR\")",
"Run and evaluate model\nTrain and evaluate.\nWe've built our Keras model using our inputs from our CSV files and the architecture we designed. Let's now run our model by training our model parameters and periodically running an evaluation to track how well we are doing on outside data as training goes on. We'll need to load both our train and eval datasets and send those to our model through the fit method. Make sure you have the right pattern, batch size, and mode when loading the data.",
"# TODO 5\nTRAIN_BATCH_SIZE = 32\nNUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset repeats, it'll wrap around\nNUM_EVALS = 5 # how many times to evaluate\n# Enough to get a reasonable sample, but not so much that it slows down\nNUM_EVAL_EXAMPLES = 10000\n\ntrainds = load_dataset(\n pattern=\"train*\",\n batch_size=TRAIN_BATCH_SIZE,\n mode='train')\n\nevalds = load_dataset(\n pattern=\"eval*\",\n batch_size=1000,\n mode='eval').take(count=NUM_EVAL_EXAMPLES // 1000)\n\nsteps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)\n\nlogdir = os.path.join(\n \"logs\", datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\ntensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=logdir, histogram_freq=1)\n\nhistory = model.fit(\n trainds,\n validation_data=evalds,\n epochs=NUM_EVALS,\n steps_per_epoch=steps_per_epoch,\n callbacks=[tensorboard_callback])",
"Visualize loss curve",
"# Plot\nimport matplotlib.pyplot as plt\nnrows = 1\nncols = 2\nfig = plt.figure(figsize=(10, 5))\n\nfor idx, key in enumerate([\"loss\", \"rmse\"]):\n ax = fig.add_subplot(nrows, ncols, idx+1)\n plt.plot(history.history[key])\n plt.plot(history.history[\"val_{}\".format(key)])\n plt.title(\"model {}\".format(key))\n plt.ylabel(key)\n plt.xlabel(\"epoch\")\n plt.legend([\"train\", \"validation\"], loc=\"upper left\");",
"Save the model",
"OUTPUT_DIR = \"babyweight_trained\"\nshutil.rmtree(OUTPUT_DIR, ignore_errors=True)\nEXPORT_PATH = os.path.join(\n OUTPUT_DIR, datetime.datetime.now().strftime(\"%Y%m%d%H%M%S\"))\ntf.saved_model.save(\n obj=model, export_dir=EXPORT_PATH) # with default serving function\nprint(\"Exported trained model to {}\".format(EXPORT_PATH))\n\n!ls $EXPORT_PATH",
"Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
0.14/_downloads/plot_evoked_delayed_ssp.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Create evoked objects in delayed SSP mode\nThis script shows how to apply SSP projectors delayed, that is,\nat the evoked stage. This is particularly useful to support decisions\nrelated to the trade-off between denoising and preserving signal.\nWe first will extract Epochs and create evoked objects\nwith the required settings for delayed SSP application.\nThen we will explore the impact of the particular SSP projectors\non the evoked data.",
"# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>\n# Denis Engemann <denis.engemann@gmail.com>\n#\n# License: BSD (3-clause)\n\nimport matplotlib.pyplot as plt\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\n\nprint(__doc__)\n\ndata_path = sample.data_path()",
"Set parameters",
"raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevent_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nevent_id, tmin, tmax = 1, -0.2, 0.5\n\n# Setup for reading the raw data\nraw = io.Raw(raw_fname, preload=True)\nraw.filter(1, 40, method='iir')\nevents = mne.read_events(event_fname)\n\n# pick magnetometer channels\npicks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True,\n include=[], exclude='bads')\n\n# If we suspend SSP projection at the epochs stage we might reject\n# more epochs than necessary. To deal with this we set proj to `delayed`\n# while passing reject parameters. Each epoch will then be projected before\n# performing peak-to-peak amplitude rejection. If it survives the rejection\n# procedure the unprojected raw epoch will be employed instead.\n# As a consequence, the point in time at which the projection is applied will\n# not have impact on the final results.\n# We will make use of this function to prepare for interactively selecting\n# projections at the evoked stage.\n\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=None, reject=dict(mag=4e-12),\n proj='delayed')\n\nevoked = epochs.average() # average epochs and get an Evoked dataset.",
"Interactively select / deselect the SSP projection vectors",
"# Here we expose the details of how to apply SSPs reversibly\ntitle = 'Incremental SSP application'\n\n# let's first move the proj list to another location\nprojs, evoked.info['projs'] = evoked.info['projs'], []\nfig, axes = plt.subplots(2, 2) # create 4 subplots for our four vectors\n\n# As the bulk of projectors was extracted from the same source, we can simply\n# iterate over our collection of projs and add them step by step to see how\n# the signals change as a function of the SSPs applied. As this operation\n# can't be undone we will operate on copies of the original evoked object to\n# keep things reversible.\n\nfor proj, ax in zip(projs, axes.flatten()):\n evoked.add_proj(proj) # add projection vectors loop by loop.\n evoked.copy().apply_proj().plot(axes=ax) # apply on a copy of evoked\n ax.set_title('+ %s' % proj['desc']) # extract description.\nplt.suptitle(title)\nmne.viz.tight_layout()\n\n# We also could have easily visualized the impact of single projection vectors\n# by deleting the vector directly after visualizing the changes.\n# E.g. had we appended the following line to our loop:\n# `evoked.del_proj(-1)`\n\n# Often, it is desirable to interactively explore data. To make this more\n# convenient we can make use of the 'interactive' option. This will open a\n# check box that allows us to reversibly select projection vectors. Any\n# modification of the selection will immediately cause the figure to update.\n\nevoked.plot(proj='interactive')\n\n# Hint: the same works with evoked.plot_topomap"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
atulsingh0/MachineLearning
|
Sklearn_MLPython/cross_validation-0.18.ipynb
|
gpl-3.0
|
[
"Cross Validation",
"# import\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import cross_val_score, KFold, train_test_split, cross_val_predict, LeaveOneOut, LeavePOut\nfrom sklearn.model_selection import ShuffleSplit, StratifiedKFold, StratifiedShuffleSplit, GroupKFold, LeaveOneGroupOut\nfrom sklearn.model_selection import LeavePGroupsOut, GroupShuffleSplit, TimeSeriesSplit\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.svm import SVC\n\nfrom scipy.stats import sem\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\niris = load_iris()\n\nX, y = iris.data, iris.target\n\n# splotting the data into train and test\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=27)\n\nprint(X_train.shape, X_test.shape, X_train.shape[0])",
"cross_val_score uses the KFold or StratifiedKFold strategies by default",
"# define cross_val func\n\ndef xVal_score(clf, X, y, K):\n \n # creating K using KFold\n cv = KFold(n_splits=2)\n \n # Can use suffle as well\n # cv = ShuffleSplit(n_splits=3, test_size=0.3, random_state=0)\n \n # doing cross validation\n scores = cross_val_score(clf, X, y, cv=cv)\n print(scores)\n print(\"Accuracy Mean : %0.3f\" %np.mean(scores))\n print(\"Std : \", np.std(scores))\n print(\"Standard Err : +/- {0:0.6f} \".format(sem(scores)))\n\nsvc1 = SVC()\nxVal_score(svc1, X_train, y_train, 10)\n\n# define cross_val predict\n# The function cross_val_predict has a similar interface to cross_val_score, but returns, \n# for each element in the input, the prediction that was obtained for that element when it \n# was in the test set. Only cross-validation strategies that assign all elements to a test \n# set exactly once can be used (otherwise, an exception is raised).\n\ndef xVal_predict(clf, X, y, K):\n \n # creating K using KFold\n cv = KFold(n_splits=K)\n \n # Can use suffle as well\n # cv = ShuffleSplit(n_splits=3, test_size=0.3, random_state=0)\n \n # doing cross validation prediction\n predicted = cross_val_predict(clf, X, y, cv=cv)\n print(predicted)\n print(\"Accuracy Score : %0.3f\" % accuracy_score(y, predicted))\n\nxVal_predict(svc1, X_train, y_train, 10)",
"Cross Validation Iterator \n K-Fold - KFold divides all the samples in k groups of samples, called folds (if k = n, this is equivalent to the Leave One Out strategy), of equal sizes (if possible). The prediction function is learned using k - 1 folds, and the fold left out is used for test.",
"X = [1,2,3,4,5]\nkf = KFold(n_splits=2)\nprint(kf)\nfor i in kf.split(X):\n print(i)",
"Leave One Out (LOO) - LeaveOneOut (or LOO) is a simple cross-validation. Each learning set is created by taking all the samples except one, the test set being the sample left out. Thus, for n samples, we have n different training sets and n different tests set. This cross-validation procedure does not waste much data as only one sample is removed from the training set:",
"X = [1,2,3,4,5]\nloo = LeaveOneOut()\nprint(loo)\nfor i in loo.split(X):\n print(i)",
"Leave P Out (LPO) - LeavePOut is very similar to LeaveOneOut as it creates all the possible training/test sets by removing p samples from the complete set. For n samples, this produces {n \\choose p} train-test pairs. Unlike LeaveOneOut and KFold, the test sets will overlap for p > 1",
"X = [1,2,3,4,5]\nloo = LeavePOut(p=3)\nprint(loo)\nfor i in loo.split(X):\n print(i)",
"Random permutations cross-validation a.k.a. Shuffle & Split - The ShuffleSplit iterator will generate a user defined number of independent train / test dataset splits. Samples are first shuffled and then split into a pair of train and test sets.\nIt is possible to control the randomness for reproducibility of the results by explicitly seeding the random_state pseudo random number generator.",
"X = [1,2,3,4,5]\nloo = ShuffleSplit(n_splits=3, test_size=0.25,random_state=0)\nprint(loo)\nfor i in loo.split(X):\n print(i)",
"Some classification problems can exhibit a large imbalance in the distribution of the target classes: for instance there could be several times more negative samples than positive samples. In such cases it is recommended to use stratified sampling as implemented in StratifiedKFold and StratifiedShuffleSplit to ensure that relative class frequencies is approximately preserved in each train and validation fold. \n Stratified k-fold \nStratifiedKFold is a variation of k-fold which returns stratified folds: each set contains approximately the same percentage of samples of each target class as the complete set.",
"X = np.ones(10)\ny = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]\n\nskf = StratifiedKFold(n_splits=3)\nfor i in skf.split(X, y):\n print(i)",
"Stratified Shuffle Split \nStratifiedShuffleSplit is a variation of ShuffleSplit, which returns stratified splits, i.e which creates splits by preserving the same percentage for each target class as in the complete set.",
"X = np.ones(10)\ny = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1]\n\nskf = StratifiedShuffleSplit(n_splits=3, test_size=0.25, random_state=33)\nfor i in skf.split(X, y):\n print(i)",
"Cross-validation iterators for grouped data\nThe i.i.d. assumption is broken if the underlying generative process yield groups of dependent samples.\nSuch a grouping of data is domain specific. An example would be when there is medical data collected from multiple patients, with multiple samples taken from each patient. And such data is likely to be dependent on the individual group. In our example, the patient id for each sample will be its group identifier.\nIn this case we would like to know if a model trained on a particular set of groups generalizes well to the unseen groups. To measure this, we need to ensure that all the samples in the validation fold come from groups that are not represented at all in the paired training fold.\nThe following cross-validation splitters can be used to do that. The grouping identifier for the samples is specified via the groups parameter. \n Group k-fold \nclass:GroupKFold is a variation of k-fold which ensures that the same group is not represented in both testing and training sets. For example if the data is obtained from different subjects with several samples per-subject and if the model is flexible enough to learn from highly person specific features it could fail to generalize to new subjects. class:GroupKFold makes it possible to detect this kind of overfitting situations.",
"X = [0.1, 0.2, 2.2, 2.4, 2.3, 4.55, 5.8, 8.8, 9, 10]\ny = [\"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\", \"d\", \"d\", \"d\"]\ngroups = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3]\n\ngkf = GroupKFold(n_splits=3)\nfor train, test in gkf.split(X, y, groups=groups):\n print(\"%s %s\" % (train, test))",
"LeaveOneGroupOut \nLeaveOneGroupOut is a cross-validation scheme which holds out the samples according to a third-party provided array of integer groups. This group information can be used to encode arbitrary domain specific pre-defined cross-validation folds.\nEach training set is thus constituted by all the samples except the ones related to a specific group.",
"X = [0.1, 0.2, 2.2, 2.4, 2.3, 4.55, 5.8, 8.8, 9, 10]\ny = [\"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\", \"d\", \"d\", \"d\"]\ngroups = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3]\n\ngkf = LeaveOneGroupOut()\nfor train, test in gkf.split(X, y, groups=groups):\n print(\"%s %s\" % (train, test))",
"Leave P Groups Out \nLeavePGroupsOut is similar as LeaveOneGroupOut, but removes samples related to P groups for each training/test set.",
"X = [0.1, 0.2, 2.2, 2.4, 2.3, 4.55, 5.8, 8.8, 9, 10]\ny = [\"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\", \"d\", \"d\", \"d\"]\ngroups = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3]\n\ngkf = LeavePGroupsOut(n_groups=2)\nfor train, test in gkf.split(X, y, groups=groups):\n print(\"%s %s\" % (train, test))",
"Group Shuffle Split \nThe GroupShuffleSplit iterator behaves as a combination of ShuffleSplit and LeavePGroupsOut, and generates a sequence of randomized partitions in which a subset of groups are held out for each split.",
"X = [0.1, 0.2, 2.2, 2.4, 2.3, 4.55, 5.8, 8.8, 9, 10]\ny = [\"a\", \"b\", \"b\", \"b\", \"c\", \"c\", \"c\", \"d\", \"d\", \"d\"]\ngroups = [1, 1, 1, 2, 2, 2, 3, 3, 3, 3]\n\ngkf = GroupShuffleSplit(n_splits=4, test_size=0.5, random_state=33)\nfor train, test in gkf.split(X, y, groups=groups):\n print(\"%s %s\" % (train, test))",
"Time Series Split \nTimeSeriesSplit is a variation of k-fold which returns first k folds as train set and the (k+1) th fold as test set. Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Also, it adds all surplus data to the first training partition, which is always used to train the model.\nThis class can be used to cross-validate time series data samples that are observed at fixed time intervals.",
"X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])\ny = np.array([1, 2, 3, 4, 5, 6])\ntscv = TimeSeriesSplit(n_splits=3)\nprint(tscv) \nfor train, test in tscv.split(X):\n print(\"%s %s\" % (train, test))",
"Model evaluation: quantifying the quality of predictions\nEstimator score method: Estimators have a score method providing a default evaluation criterion for the problem they are designed to solve.\nScoring parameter: Model-evaluation tools using cross-validation (such as model_selection.cross_val_score and model_selection.GridSearchCV) rely on an internal scoring strategy.\nMetric functions: The metrics module implements functions assessing prediction error for specific purposes."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
usdivad/fibonaccistretch
|
nbs/format_rhythms_for_rtstretch.ipynb
|
mit
|
[
"Formatting rhythms for real-time stretch\nThe goal of this notebook is to come up with an algorithm to re-format original and target rhythms such that real-time time-stretching (or even real-time stuttering) is possible.\nSetup",
"import librosa\nimport numpy as np\n\nimport pardir; pardir.pardir() # Allow imports from parent directory\nimport bjorklund\nimport fibonaccistretch\n\nlibrosa.effects.time_stretch??\nlibrosa.core.phase_vocoder??\nfibonaccistretch.euclidean_stretch??\n\n# Generate rhythms based on parameters\ndef generate_original_and_target_rhythms(num_pulses, original_length, target_length):\n original_rhythm = bjorklund.bjorklund(pulses=num_pulses, steps=original_length)\n target_rhythm = bjorklund.bjorklund(pulses=len(original_rhythm), steps=target_length) # We use len(original_rhythm) instead of num_pulses because we're doing Euclidean stretch here\n return (original_rhythm, target_rhythm)\n\noriginal_rhythm, target_rhythm = generate_original_and_target_rhythms(3, 8, 13)\n\n\"Original rhythm: {} Target rhythm: {}\".format(original_rhythm, target_rhythm)\n\nlcm = (8*13) / fibonaccistretch.euclid(8, 13)\nlcm\n\n8*13",
"\"Equalize\": Make rhythms the same length\nUse LCM to \"equalize\" rhythms so that they're of equal length.\ne.g.\na = [1,0,0,1]\nb = [1,1,0]\nbecome\nequalized_a = [1,-,-,0,-,-,0,-,-,1,-,-]\nequalized_b = [1,-,-,-,1,-,-,-,0,-,-,-]",
"# \"Equalize\" (i.e. scale rhythms so they're of equal length)\ndef equalize_rhythm_subdivisions(original_rhythm, target_rhythm, delimiter=\"-\"):\n original_length = len(original_rhythm)\n target_length = len(target_rhythm)\n lcm = (original_length*target_length) / fibonaccistretch.euclid(original_length, target_length)\n original_scale_factor = (lcm / original_length) - 1\n target_scale_factor = (lcm / target_length) - 1\n \n print(\"lcm={}, original_scale_factor={}, target_scale_factor={}\").format(lcm, original_scale_factor, target_scale_factor)\n \n delimiter = str(delimiter)\n original_rhythm = list((delimiter*original_scale_factor).join([str(x) for x in original_rhythm]))\n target_rhythm = list((delimiter*target_scale_factor).join([str(x) for x in target_rhythm]))\n \n original_rhythm.extend(list(delimiter*original_scale_factor))\n target_rhythm.extend(list(delimiter*target_scale_factor))\n\n \n return (original_rhythm, target_rhythm)\n\n# print(scale_rhythm_subdivisions(original_rhythm, target_rhythm))\noriginal_rhythm, target_rhythm = generate_original_and_target_rhythms(3, 8, 13)\nprint(\"Original rhythm: {} Target rhythm: {}\".format(original_rhythm, target_rhythm))\nequalized_original_rhythm, equalized_target_rhythm = equalize_rhythm_subdivisions(original_rhythm, target_rhythm)\n(len(equalized_original_rhythm), len(equalized_target_rhythm))",
"Get pulse indices so we can see how the equalized original and target relate. In particular, our goal is to create a relationship such that the original pulse indices always come first (so that they're bufferable in real-time)",
"def get_pulse_indices_for_rhythm(rhythm, pulse_symbols=[1]):\n pulse_symbols = [str(s) for s in pulse_symbols]\n rhythm = [str(x) for x in rhythm]\n pulse_indices = [i for i,symbol in enumerate(rhythm) if symbol in pulse_symbols]\n return pulse_indices\n\nequalized_original_pulse_indices = get_pulse_indices_for_rhythm(equalized_original_rhythm)\nequalized_target_pulse_indices = get_pulse_indices_for_rhythm(equalized_target_rhythm)\n(equalized_original_pulse_indices, equalized_target_pulse_indices)",
"For original we'll actually use ALL the steps instead of just pulses though. So:",
"equalized_original_pulse_indices = get_pulse_indices_for_rhythm(equalized_original_rhythm, [1,0])\nequalized_target_pulse_indices = get_pulse_indices_for_rhythm(equalized_target_rhythm, [1])\nprint(equalized_original_pulse_indices, equalized_target_pulse_indices)",
"Now we can check to see if all the original pulse indices come first (this is our goal):",
"for i in range(len(equalized_original_pulse_indices)):\n opi = equalized_original_pulse_indices[i]\n tpi = equalized_target_pulse_indices[i]\n if (opi > tpi):\n print(\"Oh no; original pulse at {} comes after target pulse at {} (diff={})\".format(opi, tpi, opi-tpi))",
"Oh no... how do we fix this??\n\nOne solution is to just nudge them over, especially since they only differ by 1/104 to 2/104ths of a measure in this case. \nAnother solution would be to use the same data from the original pulse if there's not a new pulse available. Hmmmm\nOr use as much of the original buffer as we can...?\n\nFirst pass at format_rhythm(), without fixing rhythm\nBut first let's define a function for end-to-end formatting:",
"# Format original and target rhythms for real-time manipulation\ndef format_rhythms(original_rhythm, target_rhythm):\n # Equalize rhythm lengths and get pulse indices\n eor, etr = equalize_rhythm_subdivisions(original_rhythm, target_rhythm)\n eopi = get_pulse_indices_for_rhythm(eor, pulse_symbols=[1,0])\n etpi = get_pulse_indices_for_rhythm(etr, pulse_symbols=[1])\n\n # Find all the ones with problematic pulses (note that we're using *pulses* of target but *steps* of original)\n for i in range(min(len(eopi), len(etpi))):\n opi = eopi[i]\n tpi = etpi[i]\n if (opi > tpi):\n print(\"Oh no; original pulse at {} comes after target pulse at {} (diff={})\".format(opi, tpi, opi-tpi))\n \n # TODO: Fix problematic pulses\n #\n \n print(\"Formatted original: {}\".format(rtos(eor)))\n print(\"Formatted target: {}\".format(rtos(etr)))\n \n return (eor, etr)\n\n# Rhythm to string\ndef rtos(rhythm):\n return \"\".join(rhythm)",
"Alright let's try this out:",
"# len(original) > len(target)\nformatted = format_rhythms([1,0,0,1,0,0,1,0], [1,0,1])\n\n# len(original) < len(target)\nformatted = format_rhythms([1,0,0,1,0,0,1,0], [1,0,0,1,1,0,0,1,1,1,1])\n\n# Trying [1,0,1,0] and [1,1] as originals, with the same target\nformatted = format_rhythms([1,0,1,0], [1,0,0,1,0,0,1,0,0,0])\nprint(\"--------\")\nformatted = format_rhythms([1,1], [1,0,0,1,0,0,1,0,0,0])",
"To make things a bit clearer maybe we'll try the abcd format for rtos()",
"# Rhythm to string\n# Method: \"str\", \"alphabet\"\ndef rtos(rhythm, format_method=\"str\", pulse_symbols=[\"1\"]):\n pulse_symbols = [str(s) for s in pulse_symbols]\n \n if format_method == \"str\":\n return \"\".join(rhythm)\n elif format_method == \"alphabet\":\n alphabet = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n output = []\n ai = 0\n for i,x in enumerate(rhythm):\n if str(x) in pulse_symbols:\n output.append(alphabet[ai%len(alphabet)])\n ai += 1\n else:\n output.append(x)\n return \"\".join(output)\n else:\n return rhythm\n \n# Format original and target rhythms for real-time manipulation\ndef format_rhythms(original_rhythm, target_rhythm, format_method=\"str\", pulse_symbols=[\"1\"]):\n # Equalize rhythm lengths and get pulse indices\n eor, etr = equalize_rhythm_subdivisions(original_rhythm, target_rhythm)\n eopi = get_pulse_indices_for_rhythm(eor, pulse_symbols=[1,0])\n etpi = get_pulse_indices_for_rhythm(etr, pulse_symbols=[1])\n\n # Find all the ones with problematic pulses (note that we're using *pulses* of target but *steps* of original)\n for i in range(min(len(eopi), len(etpi))):\n opi = eopi[i]\n tpi = etpi[i]\n if (opi > tpi):\n print(\"Oh no; original pulse at {} comes after target pulse at {} (diff={})\".format(opi, tpi, opi-tpi))\n \n # TODO: Fix problematic pulses\n #\n \n print(\"\")\n print(\"Original: {}\".format(rtos(eor, format_method=format_method, pulse_symbols=[1,0])))\n print(\"Target: {}\".format(rtos(etr, format_method=format_method, pulse_symbols=[1])))\n \n return (eor, etr)\n\n# Trying [1,0,1,0] and [1,1] as originals, with the same target\nformatted = format_rhythms([1,0,1,0], [1,0,0,1,0,0,1,0,0,0], format_method=\"alphabet\")\nprint(\"\\n--------\\n\")\nformatted = format_rhythms([1,1], [1,0,0,1,0,0,1,0,0,0], format_method=\"alphabet\")\n\nformatted = format_rhythms(original_rhythm, target_rhythm, format_method=\"alphabet\")",
"Exploring adjustment options\nLet's use this example to explore adjustment options:",
"print(\"Original rhythm: {}\\nTarget rhythm: {}\\n\".format(original_rhythm, target_rhythm))\nformatted = format_rhythms(original_rhythm, target_rhythm, format_method=\"alphabet\")",
"In all the following cases only the target changes, not the original:\n1. For every problematic pulse (e.g. C), just re-use the previous pulse\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------B-------0-------C-------0-------D-------E-------0-------F-------0-------G-------\nPros:\n\nIt's simple\n\nCons:\n\nWe end up losing pulse H.\n\n2. For every problematic pulse, re-use prev pulse, but on the next step (i.e. 0) use newest pulse\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------B-------C-------D-------0-------E-------E-------F-------G-------0-------H-------\nPros:\n\nWe use all the pulses (if we have enough 0s)\n\nCons:\n\nWe kind of end up obfuscating the pulses actual desired target rhythm. In this case\n 10110 10110 101\n becomes\n 10111 10111 101\n\n3. For every problematic pulse, just nudge the corresponding target pulse\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B---------C-----0-------D-------0-------E--------F------0-------G-------0-------H-------\nPros:\n\nWe avoid having to repeat any pulses (repeated pulses could sound weird)\nWe use all the pulses\n\nCons:\n\nThe rhythm becomes metrically incorrect and could sound unnatural/bad.\n\n4. Subdivide original rhythm further until we don't have problematic pulses anymore",
"formatted = format_rhythms(original_rhythm, target_rhythm, format_method=\"alphabet\")\nprint(\"\\n--------\\n\")\nformatted = format_rhythms([1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0], target_rhythm, format_method=\"alphabet\")",
"Needs more work.\n5. Method 4, but use 0s of target rhythm as pulses too\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------I------------J------------K------------L------------M------------N------------O------------P------------\nTrgt: A---------------0---------------B---------------C---------------0---------------D---------------0---------------E---------------F---------------0---------------G---------------0---------------H---------------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------I------------J------------K------------L------------M------------N------------O------------P------------\nTrgt: A---------------B---------------C---------------D---------------E---------------F---------------G---------------H---------------I---------------J---------------K---------------L---------------M---------------\nPros:\n\nWe preserve both rhythms AND avoid repeating segments, which is good\n\nCons:\n\nWe end up scrapping a lot of the latter pulses (pulses N, O, and P)\n\n6. Use 0s of target rhythm as pulses too, if necessary\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\n...\n7. Just always use the most recent pulse we have, when needed\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------B-------0-------D-------0-------E-------E-------0-------G-------0-------H-------\nPros:\n\nPreserve rhythm shape, and it's simple\n\nCons:\n\nWe lose intermediate pulses, in this case C and F\n\n8. Combine methods 4 and 7\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------I------------J------------K------------L------------M------------N------------O------------P------------\nTrgt: A---------------0---------------B---------------C---------------0---------------D---------------0---------------E---------------F---------------0---------------G---------------0---------------H---------------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------I------------J------------K------------L------------M------------N------------O------------P------------\nTrgt: A---------------B---------------C---------------D---------------E---------------G---------------H---------------I---------------J---------------K---------------M---------------N---------------O---------------\nPros:\n\nAgain, we preserve rhythm shape\n\nCons:\n\nWe lose intermediate pulses F, L, and P\n\n9. Method 7 (use most recent available pulse), but stretch instead of repeat pulses\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------0-------0-------D-------0-------E-------0-------0-------G-------0-------H-------\nPros:\n\nAvoid having to repeat pulses\n\nCons:\n\nWe lose intermediate pulses, in this case C and F\nWe lose rhythm shape, as\n 10110 10110 101\n becomes\n 10100 10100 101\n\n10. Method 2, but stretch instead of repeat. So stretch, and use new pulse on next available step (i.e. 0)\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------C-------0-------D-------0-------E-------F-------0-------G-------0-------H-------\nbecomes\nOrig: A------------B------------C------------D------------E------------F------------G------------H------------\nTrgt: A-------0-------B-------0-------C-------D-------0-------E-------0-------F-------G-------0-------H-------\nPros:\n\nAvoid having to repeat pulses\nWe use all the pulses!\n\nCons:\n\nWe lose rhythm shape, as\n 10110 10110 101\n becomes\n 10101 10101 101\n\nMethod 11: Just don't let the user create target rhythms with problematic pulses\nMmhmm din\nSo What?\nSo it's a tradeoff between:\n\na) Using all the pulses\nb) Preserving target rhythm shape\nc) Avoiding repeating of pulses (which could sound unnatural)\n\nMethod 10 achieves (a) and (c).\nMethod 5 achieves (b) and (c).\nMethod 2 achieves (a).\nMethod 7 achieves (b), while 9 achieves (c).\nTrying out method 10\nLet's try Method 10 for now. We'll redefine our rtos method:",
"# Rhythm to string\n# Method: \"str\", \"alphabet\"\ndef rtos(rhythm, format_method=\"str\", pulse_symbols=[\"1\"]):\n pulse_symbols = [str(s) for s in pulse_symbols]\n rhythm = [str(x) for x in rhythm]\n \n if format_method == \"str\":\n return \"\".join(rhythm)\n elif format_method == \"alphabet\":\n alphabet = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n output = []\n ai = 0\n for i,x in enumerate(rhythm):\n if str(x) in pulse_symbols:\n output.append(alphabet[ai%len(alphabet)])\n ai += 1\n else:\n output.append(x)\n return \"\".join(output)\n else:\n return rhythm\n \n# Format original and target rhythms for real-time manipulation\ndef format_rhythms(original_rhythm, target_rhythm, format_method=\"str\", original_pulse_symbols=[1,0], target_pulse_symbols=[\"1\"], delimiter=\"-\"):\n \n # Convert all arrays to string arrays\n original_rhythm = [str(x) for x in original_rhythm]\n target_rhythm = [str(x) for x in target_rhythm]\n original_pulse_symbols = [str(x) for x in original_pulse_symbols]\n target_pulse_symbols = [str(x) for x in target_pulse_symbols]\n \n # Adjust target rhythm if inappropriate number of pulses\n if len(original_rhythm) > len([x for x in target_rhythm if x in target_pulse_symbols]):\n while len(target_rhythm) < len(original_rhythm):\n target_rhythm.extend(target_rhythm)\n target_rhythm = bjorklund.bjorklund(pulses=len(original_rhythm), steps=len(target_rhythm))\n target_rhythm = [str(x) for x in target_rhythm]\n \n # Equalize rhythm lengths and get pulse indices\n eq_original_rhythm, eq_target_rhythm = equalize_rhythm_subdivisions(original_rhythm, target_rhythm, delimiter=delimiter)\n eq_original_step_indices = get_pulse_indices_for_rhythm(eq_original_rhythm, pulse_symbols=[1,0])\n eq_target_pulse_indices = get_pulse_indices_for_rhythm(eq_target_rhythm, pulse_symbols=[1])\n \n # Assertion that may not hold up\n # assert(len(eq_original_step_indices) == len(target_pulse_indices))\n\n # Find all the ones with problematic pulses (note that we're using *pulses* of target but *steps* of original)\n # for i in range(min(len(eq_original_step_indices), len(eq_target_pulse_indices))):\n # osi = eq_original_step_indices[i]\n # tpi = eq_target_pulse_indices[i]\n # if (osi > tpi):\n # print(\"Oh no; original pulse at {} comes after target pulse at {} (diff={})\".format(osi, tpi, osi-tpi))\n \n # Fix problematic pulses using method 10\n # (Starting to do this in a more C++ style so it's easier to port)\n \n fixed_eq_target_rhythm = list(delimiter * len(eq_target_rhythm))\n eq_target_step_indices = get_pulse_indices_for_rhythm(eq_target_rhythm, pulse_symbols=[1,0])\n \n print(\"eq_original_step_indices: {}\".format(eq_original_step_indices))\n print(\"eq_target_pulse_indices: {}\".format(eq_target_pulse_indices))\n print(\"eq_target_step_indices: {}\".format(eq_target_step_indices))\n \n osi_idx = -1\n tpi_idx = 0\n \n for i in range(len(eq_target_rhythm)):\n # print(i)\n\n # Update index for original step indices\n if i in eq_original_step_indices:\n # osi_idx = min(osi_idx+1, len(eq_original_step_indices)-1)\n osi_idx += 1\n \n \n # Adjust\n if i in eq_target_step_indices:\n osi = eq_original_step_indices[osi_idx]\n tpi = eq_target_pulse_indices[tpi_idx]\n print(\"{}: osi@{}={}, tpi@{}={}\".format(i, osi_idx, osi, tpi_idx, tpi))\n \n # Make sure current position isn't earlier than original's,\n # and that target pulse position isn't ahead of original pulse position\n if i >= osi and i >= tpi and osi_idx >= tpi_idx:\n fixed_eq_target_rhythm[i] = 1\n tpi_idx += 1\n print(\"set to 1\")\n continue\n else:\n fixed_eq_target_rhythm[i] = 0\n print(\"set to 0\")\n continue\n\n # Otherwise, it's a delimiter, so we just put a delimiter there\n fixed_eq_target_rhythm[i] = delimiter\n \n print(\"\")\n print(\"OrigPlse: {}\").format(rtos(eq_original_rhythm, format_method=\"str\", pulse_symbols=[1]))\n print(\"OrigStep: {}\".format(rtos(eq_original_rhythm, format_method=format_method, pulse_symbols=[1,0])))\n print(\"TrgtPlse: {}\".format(rtos(eq_target_rhythm, format_method=format_method, pulse_symbols=[1])))\n print(\"FixdPlse: {}\".format(rtos(fixed_eq_target_rhythm, format_method=format_method, pulse_symbols=[1])))\n \n return (eq_original_rhythm, fixed_eq_target_rhythm)\n\nprint(\"Original rhythm: {}\\nTarget rhythm: {}\\n\".format(original_rhythm, target_rhythm))\nformatted = format_rhythms(original_rhythm, target_rhythm, format_method=\"alphabet\")",
"That looks more like it! Let's try a few more:",
"formatted = format_rhythms([1,0,1,1], [1,0,1,0,1], format_method=\"alphabet\")\n\nformatted = format_rhythms([1,0,1,1], [1,0,1,0,1,1], format_method=\"alphabet\")\n\nformatted = format_rhythms([1,0,1,1,0,1], [1,0,1,1,0,1,0], format_method=\"alphabet\")\n\nformatted = format_rhythms([1,0,1,1,0,1], [0], format_method=\"alphabet\")",
"In the previous two examples, the fixed rhythm doesn't resemble the target rhythm at all...\nI guess this is because we're using the Bjorklund fix too, to fix pulses.\nBut if we're going through all this trouble to \"fix\" rhythms, maybe we should just place constraints on what the user can do in the first place?\nI guess we should have both.\ne.g. if they put a pulse before the original position, it'll show up red or something.\nHmm, but in that case, maybe we do want to preserve the original rhythm. And just have it repeat or something?\nIn other words, use method 7.\nTrying out method 7",
"# TODO: Try with method 7 vs. method 10",
"Misc noodles",
"np.indices?\n\n\"0\"*4\n\"-\".join([str(x) for x in [2,3,4]])\n\nprint(\"a\"); print(\"b\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
bpgc-cte/python2017
|
Week 3/Lecture_5_Introdution_to_Functions.ipynb
|
mit
|
[
"Introduction to Programming : Lecture 5\nAgenda for the class\n\nIntroduction to functions\nPractice Questions\n\nFunctions in Python\nSyntax\ndef function_name(input_1,input_2,...):\n '''\n Process input to get output\n '''\n return [output1,output2,..]",
"#Example_1: return keyword\ndef straight_line(slope,intercept,x):\n \"Computes straight line y value\"\n y = slope*x + intercept\n return y\n\nprint(\"y =\",straight_line(1,0,5)) #Actual Parameters\nprint(\"y =\",straight_line(0,3,10))\n\n#By default, arguments have a positional behaviour\n#Each of the parameters here is called a formal parameter\n\n\n#Example_2\ndef straight_line(slope,intercept,x):\n y = slope*x + intercept\n print(y)\n \n \nstraight_line(1,0,5)\nstraight_line(0,3,10)\n\n#By default, arguments have a positional behaviour\n#Functions can have no inputs or return.",
"Question: Is it necessary to know the order of parametres to send values to a function?",
"\nstraight_line(x=2,intercept=7,slope=3) ",
"Passing values to functions",
"list_zeroes=[0 for x in range(0,5)]\nprint(list_zeroes) \n\ndef case1(list1):\n list1[1]=1\n print(list1)\n \ncase1(list_zeroes)\nprint(list_zeroes) \n\n#Passing variables to a function\nlist_zeroes=[0 for x in range(0,5)]\nprint(list_zeroes) \n\ndef case2(list1):\n list1=[2,3,4,5,6]\n print(list1)\n \ncase2(list_zeroes)\nprint(list_zeroes) \n",
"Conclusion:\n\nIf the input is a mutable datatype, and we make changes to it, then the changes are refelected back on the original variable. (Case-1)\nIf the input is a mutable datatype, and we assign a new value to it, then the changes are not refelected back on the original variable. (Case-2)\n\nDefault Parameters",
"def calculator(num1,num2,operator='+'):\n if (operator == '+'):\n result = num1 + num2\n elif(operator == '-'):\n result = num1 - num2\n \n return result \n\nn1=int(input(\"Enter value 1: \"))\nn2=int(input(\"Enter value 2: \"))\nv_1 = calculator(n1,n2)\nprint(v_1)\nv_2 = calculator(n1,n2,'-')\nprint(v_2) \n\n# Here, the function main is termed as the caller function, and the function\n# calculator is termed as the called function\n# The operator parameter here is called a keyword-argument",
"Initialization of variables within function definition",
"\ndef f(a, L=[]):\n L.append(a)\n return L\n\nprint(f(1))\nprint(f(2))\nprint(f(3))\n\n# Caution ! The list L[] was initialised only once.\n#The paramter initialization to the default value happens at function definition and not at function call.",
"* operator\n1. Unpacks a list or tuple into positional arguments\n\n** operator\n2. Unpacks a dictionary into keyword arguments\n\nTypes of parametres\n\nFormal parameters (Done above, repeat)\nKeyword Arguments (Done above, repeat)\n*variable_name : interprets the arguments as a tuple\n**variable_name : interprets the arguments as a dictionary",
"def sum(*values):\n s = 0\n for v in values:\n s = s + v\n return s\n\ns = sum(1, 2, 3, 4, 5)\nprint(s)\n\ndef get_a(**values):\n return values['a']\n\ns = get_a(a=1, b=2) # returns 1\nprint(s)\n\ndef sum(*values, **options):\n s = 0\n for i in values:\n s = s + i\n if \"neg\" in options:\n if options[\"neg\"]:\n s = -s\n return s\n\ns = sum(1, 2, 3, 4, 5) # returns 15\nprint(s)\ns = sum(1, 2, 3, 4, 5, neg=True) # returns -15\nprint(s)\ns = sum(1, 2, 3, 4, 5, neg=False) # returns 15\nprint(s)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
asignor/DANDP3
|
DANDP3.ipynb
|
mit
|
[
"DANDP3\nWrangling Data from OSM with MongoDB\nData Analyst Nanodegree\nAnna Signor\nProcess Overview\nI followed the folowing steps:\n\ndownload XML from Open Street Map, using mapzen\nwith Python scripts check for problems in the data\nadjust code accordingly\nparse and shape data into one JSON file\nimport data into MongoDB\nthrough datbase queries, check for remaining problems\nrepeat last 4 steps until data is acceptable\n\nArea\nThe area chosen is São Paulo, Brazil, where I was born and raised. The XML dataset was downloaded from MapZen. My intention was to download and explore the São Paulo Metropolitan Area, this map relation. As is detailed in a further section, I found that this is a rather ambiguous term, there being at least two different concepts that translate into that from Portuguese. The conclusion is the area analysed is called Complexo Metropolitano Expandido or Expanded Metropolitan Complex of São Paulo, a considerably larger area than what I initially thought I had downloaded.\nExamples of Problems Uncovered Before Querying DB\n1. Street Names\nUsing an audit function two distinct classes of problems were uncovered with the street names:\n street types uppper/lower case or abbreviations inconsistencies, mispellings: \nE. g. the words \"Rua\", \"R.\", \"RUA\", \"Rue\" and \"rua\" all occurred\n street types mising : \nE. g. \"Alfonso Bovero\" where \"Avenida Alfonso Bovero\" should be\nBoth issues were addressed by the function improve_names_BR below. Information elucidaded by audit was fed back into the code.",
"mapping = {'avenida':'Avenida', #mapping to fix case mispellings, case, abbreviations\n u'Al.': 'Alameda',\n 'Rue': 'Rua',\n u'Av.': 'Avenida',\n u'Av': 'Avenida',\n 'RUa': 'Rua',\n 'R': 'Rua',\n 'Acost.': 'Acostamento',\n 'RUA': 'Rua',\n 'rua' : 'Rua',\n 'R.' : 'Rua',\n 'AC': 'Acesso',\n 'estrada' : 'Estrada',\n 'travessa' : 'Travessa' \n } \ngood_types = set(['Acostamento', #Set of acceptable street types\n u'Pra\\xe7a', \n 'Alameda', \n 'Viela', \n 'Estrada', \n 'Rua', \n 'Acesso', \n 'Parque', \n 'Largo', \n 'Via', \n 'Marginal', \n 'Rodovia', \n 'Corredor', \n 'Viaduto', \n 'Travessa', \n 'Pateo', \n 'Avenida', \n 'Passagem',\n u'Complexo Vi\\xe1rio'])\nmapping2 = {u'1\\xaa Travessa da Estrada do Morro Grande' : '', #mapping for case when street type is missing\n 'Alfonso Bovero' : 'Avenida', #this was manually created by looking up all the names\n u'N\\xedvia Maria Dombi' : 'Travessa' #the ones not showing here are \"Rua\" types\n }\n\ngood_tuple = tuple(good_types)\n\ndef improve_name_BR(name):\n \"\"\"takes a street name from sao paulo and returns improved name\"\"\"\n words = name.split() \n if name.startswith(good_tuple):\n return name\n ### if name is okay, return name (do nothing)\n elif words[0] in mapping:\n words[0] = mapping[words[0]] \n return ' '.join(words)\n ### if type is mispelled or miscased, update 1st word of name and return joined string\n elif name in mapping2:\n return mapping2[name] + ' ' + name\n ### if name is one of the odd cases but not needing word 'Rua', use mapping2 to fix\n else:\n return 'Rua' + ' ' + name\n ### the cases left are the ones where the word 'Rua' was left out\n ### this choice was made because \"Rua\" is the most commonly occurring type",
"2. Data Structure\nThe data structure was interesting. It may be adequate for OSM, but it is certainly not how I would like for it to figure in the MongoDB collection. Some information is represented directly as attributes of a main XML data primitive element, while others, as attributes of child elements tagged \"tag\". A choice was made by OSM to have attributes called \"k\" and \"v\", the values of which represent keys and values, rather than using an XML of the type<key> value <key/>. Furthermore, there was use of a colon hierarquical structure in some of the \"k\"s. Here is an example node:\n<node changeset=\"38648623\" id=\"4128352041\" lat=\"-23.5591903\" lon=\"-46.6587486\" timestamp=\"2016-04-17T17:59:02Z\" uid=\"2030995\" user=\"Bonix-Mapper\" version=\"1\">\n <tag k=\"name\" v=\"Banca Paulista V\" />\n <tag k=\"shop\" v=\"books\" />\n <tag k=\"phone\" v=\"+55 11 3288-8241\" />\n <tag k=\"addr:street\" v=\"Avenida Paulista\" />\n </node>\nI would like to represent the data in JSON in a different shape, so this was something that had to be considered while parsing. To put it simply: although a very simple and straight-forward mapping to translate XML into JSON is always possible, this is not what I used because the data was not in a desireable shape. This is what the function shape_element mostly does. \nThe shape chosen to represent the data is the following:\n{\n\"id\": value,\n\"data_prim\": node_way_or_relation,\n\"visible\":true_or_false,\n\"created\": {\n \"version\":value,\n \"changeset\":value,\n \"timestamp\":value,\n \"user\":value,\n \"uid\":value_for_user\n },\n\"pos\": [LAT, LON],\n\"address\": {\n \"housenumber\": value,\n \"postcode\": value,\n \"street\": value\n ...\n },\n\"amenity\": value,\n\"cuisine\": value,\n\"name\": value,\n\"phone\": value\n\"any_other_attrib_1\": value\n...\n\"any_other_attrib_n\": value\n\"A\" : {\n \"B\" : { \n \"C\" : value \n }\n }\n}\nwhere \"A\", \"B\" and \"C\" represent the <tag k=\"A:B:C\" v=value /> value situations that are not the address, which is a special case.\nMost of this was hard-coded into shape_element, and to deal with the colons I used a helper function called smarter_nestify. (Which actually allows for processing a key containing an arbitrary number of colons into nested dictionaries, using regression.)",
"def smarter_nestify(l, record):\n \"\"\"Takes a list [a1, a2, a3, ... , an, value] and a pre-existing dictionary structure returns a nested dictionary object\n {a1 : {a2 : {a3 : ... {an : value} ...}}}, respecting the pre-existing dictionary records, that is, for each recursion\n step if a dictionary ai already exists it will add a key ai+1 to it rather than creating a new dictionary ai.\"\"\" \n if len(l) == 2: #if list is down to two elements [a, val], return {a : val} \n key = l[0]\n value = l[1]\n return {key: value}\n else:\n key = l[0]\n record[key] = smarter_nestify(l[1:], record.get(key, {})) \n return record\n # function pops the first element of the list, makes a dictionary {k : v} where k is the popped element and v is what is \n # returned when calling itself on popped list and empty dictionary or existing one, depending on record\n \"\"\"adapted from: \n http://stackoverflow.com/questions/37014500/how-to-use-recursion-to-nest-dictionaries-while-integrating-with-existing-record\n \"\"\"",
"3. Repeated Attribute Keys\nThis was an interesting issue that stems from not discarding the tags containing colons. \nThe problem is tag 'k' attributes that have different functions being called the exact same in the XML data. \nFor example, we had: \n<tag 'k'='lanes:psv:forward' 'v'='1'>\nand \n<tag 'k'='lanes' 'v'='2'>\nin the same node. In the OSM XML schema, the first data has to do with lanes that have special permission (like a taxi, carpool or a bus lane) while the second is simply the number of lanes in any road. This was causing a variable type error. It was resolved by repeatedly trapping the error and using the information back into the code manually. \nRef: https://discussions.udacity.com/t/keep-attr-atrr-atrr-formatted-data/166864/14\n4. Variable types\nThe data was almost entirely represented in strings or unicode. A lot of the data will be more useful a different variable type. MongoDB supports all the types supported in Python, and since some of the calculations are numerical, it will be in my favor to convert types as the data is parsed.\nConversions made:\n\n\n'POS' into float\n\n\n'version' into int\n\n\nOBS: It is in my favor to treat postcode as a string since the zeroes to the left have significance, which an int type would ignore. Also, brazilian postcodes contain a non-numerical charachter \"-\", which would cause problems.\nExamples of Problems Uncovered by DB Query\nThe cell below is preparation for DB query.",
"from pymongo import MongoClient\nimport pprint\n\nclient = MongoClient()\ndb = client\nsp = db.my_osm.cme #shorthand since all the queries will be in same collection",
"1. Bad Key 'type' Creating Incorrect Parsing\nAt first, the desired data format had a 'type' key in the main JSON node, to designate a map node, way or relation (please note the word \"node\" here has two very different meanings). So I ran the query below to find out how many relations were in the dataset.",
"relations = sp.find({'type' : 'relation'})",
"The original output of this query was 4. It seems odd that a metropolis so big would have 4 relations. Investigating further, I found there were 261671 ways and 1900291 nodes.",
"4 + 261671 + 1900291 - 2168319 #this should return 0",
"What this effectively means is that there is some kind of discrepancy, and it is not small.",
"types = sp.distinct('type')\npprint.pprint(types)",
"The above output is totally unexpected, as we should see only \"node\", \"relation\" or \"way\". A little research was helpful in pinpointing the issue, mainly that relation data primitive were translating into nodes with incorrect 'type' assignment, because many of the relations themselves had a 'type' attribute or tag child. Full explanation on this reference. The forum post also contains the solution, which is to not use the word \"type\". I decided to call this key \"data_prim\" in short for \"data primitive\".\nAfter fixing the Python code, making a new JSON file, clearig the old collection off the database and loading the new one, when we run the queries below the expected outputs are produced.",
"types = sp.distinct('data_prim')\npprint.pprint(types)\n\ncursor = sp.find({'data_prim' : 'way'})\na = len(list(cursor))\nprint a, 'ways'\ncursor = sp.find({'data_prim' : 'node'})\nb = len(list(cursor))\nprint b, 'nodes'\ncursor = sp.find({'data_prim' : 'relation'})\nc = len(list(cursor))\nprint c, 'relations'\nprint 'discrepancy:', 2168319 - a - b - c",
"2. Seamarks and unexpected postcodes\nOne of the strange things in my data was the occurrence of the tag \"seamark\". This is one of the features that make heavy use of the colons structure in the OSM XML schema, so it was in the back of my head. A simple query reveales how many of them there are in the data.",
"sp.find({'seamark' : {'$exists' : 1}}).count()",
"The reason why this is strange is upon further investigation in the OSM Wiki, I found these are features that should occur in oceanic coasts. Querying the data base for examples I found some were lighhouses and buoyes. The problem is the metropolitan area I was supposed to be analysing contains NO sea coast.\nThe query below shows how many and which cities figure in the dataset.",
"cities = sp.distinct('address.city')\n\ncities.sort()\nprint cities\nlen(cities)",
"This seems to indicate the area is, in fact, what is called Expanded Metropolipan Complex of São Paulo or Complexo Metropolitano Expandido (in this terminology \"São Paulo\" is implied), also called Paulistan Macrometropolis or Macrometrópole Paulista. It is not, as I thought, São Paulo Metropolitan Area, or Região Metropolitana de São Paulo (RMSP), also called Large São Paulo Grande São Paulo. The difference in area and concepts can be grasped by checking out the Wikipedia pages RMSP and CME for those who are interested. The important point to make is that this dataset is referring to the Expanded Metropolitan Complex which indeed includes parts of the Atlantic coast. This explains the occurrence of municiples and postcodes I did not expect, as well as the \"seamark\" tags, which I now understand are from the Santos Seaboard.\nIt also explains the occurrence of certain unexpected postcodes:",
"sp.find({'address.postcode' : {'$regex' : '^1[2-9]'}}).count()\n\nsp.find_one({'address.postcode' : {'$regex' : '^1[2-9]'}})",
"2. Postcode Format Inconsistencies\nUsing the '$regex' operator, I was able to audit how postcodes format. By Brazilian convention, the format we should see is 'ddddd-ddd', or the regex '^([0-9]){5}([-])([0-9]){3}$'.",
"sp.find({'address.postcode' : {'$exists' : 1}}).count()\n\nsp.find({'address.postcode' : {'$regex' : '^([0-9]){5}([-])([0-9]){3}$'}}).count()",
"The query shows there are some inconsistencies. I want to peek at 10 examples and see some cases.",
"pipe = [{'$match' : {'address.postcode' : { '$regex' : '^(?!^^([0-9]){5}([-])([0-9]){3}$).*$'}}},\n { '$limit' : 10 }, \n {'$project' : {'address' : 1 }}]\nlist(sp.aggregate(pipe))",
"It seems there is a mix of incorrect format, such as '05025010' instead of '05025-010' and typos like an extra number or a missing one. My solution is in the first case, reformat, the second, discard. This was included in shape_element, and the data re-parsed and re-loaded into MongoDB.",
"pipe = [{'$match' : {'address.postcode' : { '$regex' : '^(?!^^([0-9]){5}([-])([0-9]){3}$).*$'}}},\n { '$limit' : 10 }, \n {'$project' : {'address' : 1 }}]\nlist(sp.aggregate(pipe))",
"As seen above, a query for 10 postcodes that do not fit the format now returns an empty list, showing the problem was fixed.\nOBS: A similar process can be done for phone numbers, however it is an intricate process for brazilian phone numbers are there are many different formats. (At this point numbers can have 8 to 10 digits excluding area code, there is an optional designation of operator, and other complicating issues.) This will require a little more time consuming research.\n3. Incorrect Postcode",
"t = sp.find({'address.postcode' : {'$regex' : '^2'}})\npprint.pprint(list(t))",
"The above postcode is incorrect, it is supposed to be 02545-000.\n4. Missing Postcodes (CEPs)",
"a = sp.find({'address' : {'$exists' : 1}}).count()\nb = sp.find({'address' : {'$exists' : 1}, 'address.postcode' : {'$exists' : 1}}).count()\nc = sp.find({'address' : {'$exists' : 1}, 'address.postcode' : {'$exists' : 0}}).count()\nprint 'number of addresses:', a\nprint 'number of addresses with CEP:', b \nprint 'number of addresses without CEP:', c \nprint 'percentage of addresses missing CEP:',int((float(c)/float(a))*100),'%'",
"Almost half of the addresses do not have postcodes (called \"CEP\" in Brazil). One follow-up project would be to scrape the CEPs from a reputable website (like Correios) and feed the CEPs back into the database. A good measure would be to obtain them by coordinates and by address both and analyse discrepancies.\nStatistical Overview of the Data\nThis section contains statistical facts about the data, as well as the query used to determine it when applicable.\n XML file size: \n411,798 KB\n JSON file:\nis 472,242 KB\n number of documents in the colection:",
"sp.find().count()",
"number of documents by data primitive:",
"cursor = sp.find({'data_prim' : 'way'})\na = len(list(cursor))\nprint a, 'ways'\ncursor = sp.find({'data_prim' : 'node'})\nb = len(list(cursor))\nprint b, 'nodes'\ncursor = sp.find({'data_prim' : 'relation'})\nc = len(list(cursor))\nprint c, 'relations'",
"unique user ids:",
"len(sp.distinct('created.uid'))",
"the document that has the highest number of versions:",
"versions = sp.distinct('created.version')\nprint max(versions)\npprint.pprint(list(sp.find({'created.version' : max(versions)})))",
"'amenities' that occur the most, top 10",
"pipe = [{'$match' : {'amenity': {'$exists' : 1}}},\n {'$group': {'_id': '$amenity', 'count': {'$sum': 1}}},\n {'$sort' : {'count': -1}},\n {'$limit' : 10},\n {'$project' : {'amenity' : 1, 'count': 1}}\n ]\nc = sp.aggregate(pipe)\npprint.pprint(list(c))",
"top 10 religions",
"pipe = [{'$match' : {'amenity': 'place_of_worship'}},\n {'$group': {'_id': '$religion', 'count': {'$sum': 1}}},\n {'$sort' : {'count': -1}},\n {'$limit' : 10},\n {'$project' : {'religion' : 1, 'count': 1}}\n ]\nc = sp.aggregate(pipe)\npprint.pprint(list(c))",
"This shows this data is severely incomplete and does not lend itself for statistics, not on religion, anyway. There are lenty more than 2 Jewish places of worship in Sao Paulo.\n number of pizza places:",
"sp.find({'cuisine': 'pizza'}).count()",
"Again, I am sure that it actually is more.\nExamples of Curiosity Queries\nThis section contains queries that were performed motivated by personal curiosity. If the choices seem arbitrary, it is because they are.\nHow many street names have a military rank in the name?",
"def street_starts_with(letters):\n \"\"\"takes a string and returns a regex string to be used with operator\n $regex to query sp collections for streets starting with the string\"\"\"\n a = ['Acostamento', \n u'Pra\\xe7a', \n 'Alameda', \n 'Viela', \n 'Estrada', \n 'Rua', \n 'Acesso', \n 'Parque', \n 'Largo', \n 'Via', \n 'Marginal', \n 'Rodovia', \n 'Corredor', \n 'Viaduto', \n 'Travessa', \n 'Pateo', \n 'Avenida', \n 'Passagem',\n u'Complexo Vi\\xe1rio']\n expression = '|'.join(a)\n return '^'+'(' + expression + ')' + ' ' + letters\n\n\"\"\" from wikipedia: Almirante\tMarechal\tMarechal do Ar\nAlmirante de Esquadra\tGeneral de Exército\tTenente Brigadeiro do Ar\nVice Almirante\tGeneral de Divisão\tMajor Brigadeiro\nContra Almirante\tGeneral de Brigada\tBrigadeiro\nCapitão de Mar e Guerra\tCoronel\tCoronel\nCapitão de Fragata\tTenente Coronel\tTenente Coronel\nCapitão de Corveta\tMajor\tMajor\nCapitão Tenente\tCapitão\tCapitão \"\"\"\n\nmilitary_ranks = ['Almirante',\n 'Marechal',\n 'Marechal',\n 'General',\n 'Tenente',\n 'Brigadeiro',\n 'Major',\n 'Contra Almirante',\n u'Capitão']\nnomes = {}\nfor rank in military_ranks:\n nomes[rank] = set()\n result = sp.find({'data_prim' : 'way', 'name': {'$regex': street_starts_with(rank)}})\n for r in result:\n nomes[rank].add(r['name'])\nsoma = 0\nfor k in nomes:\n soma += len(nomes[k])\n print k, ':', len(nomes[k])\nprint 'TOTAL:', soma\n",
"How many street names start with X? And Z?",
"x = sp.find({'data_prim':'way', 'name': {'$regex': street_starts_with('X')}}).count()\nprint 'Starting with X:', x\nz = sp.find({'data_prim':'way', 'name': {'$regex': street_starts_with('Z')}}).count()\nprint 'Starting with Z:', z",
"Conclusion\nThere are some unanswered questions that could lend themselves to further projects:\n\nWhat is the exact delimitations of the data and is it complying with official government designation for the Expanded Metropolitan Complex?\n\nIt proved harder than it seems to obtain such official designation. I fully intend to continue to pursue this question. Once the official information is at hand, it will not be hard to check against the data.\n\nHow many street, schools or other public places have been named after officials of the military dictatorship under which Brazil was governed between 1964 and 1985?\n\nThis is a bigger project, requiring the scraping of quantities of information on the historical period. It is, however, feasible and an important question to be answered. Perhaps, initially it can be answered for the city only and then expand the question.\nOverall, the data lacks completeness and normalization. One important task is to add the postcodes to all the addresses, a plan for which is outlined above. Another idea for data validation is to check the data for consistency with information scraped from Correios website, which contains the official post office information in Brazil. Once the post office data is understood and one is able to perform efficient requests and scraping, the following validation tests can be performed:\n - postcodes from data vs POS \n - postcode from data vs street names\nThis is powerful data and can be used in many different creative ways, such as using the POS information to make plots showing what is popular in each neighborhood, which ones have more parks, different types of churches. The possibilities are endless once the data is properly clean, normalized and validated.\nReferences\n\nhttp://www.openstreetmap.org/relation/2661855#map=9/-23.6242/-46.4510\nseveral pages os OSM Wiki\nhttps://pt.wikipedia.org/wiki/Regi%C3%A3o_Metropolitana_da_Baixada_Santista\nhttps://pt.wikipedia.org/wiki/Complexo_Metropolitano_Expandido\nhttps://pt.wikipedia.org/wiki/Regi%C3%A3o_Metropolitana_de_S%C3%A3o_Paulo#Munic.C3.ADpios\nhttps://en.wikipedia.org/wiki/Expanded_Metropolitan_Complex_of_S%C3%A3o_Paulo\nhttps://pt.wikipedia.org/wiki/Complexo_Metropolitano_Expandido\nhttp://stackoverflow.com/questions/37014500/how-to-use-recursion-to-nest-dictionaries-while-integrating-with-existing-record\nhttps://discussions.udacity.com/t/reducing-memory-footprint-when-processing-large-datasets-in-xml/37571/3\nhttp://stackoverflow.com/questions/3095434/inserting-newlines-in-xml-file-generated-via-xml-etree-elementtree-in-python\nhttps://discussions.udacity.com/t/how-to-provide-sample-data-for-the-final-project/7118/13\nhttps://discussions.udacity.com/t/valueerror-i-o-operation-on-closed-file/167469/6\nhttps://discussions.udacity.com/t/keep-attr-atrr-atrr-formatted-data/166864/14\nhttps://discussions.udacity.com/t/i-have-an-adequate-update-name-to-improve-street-names-not-sure-how-to-use-it/166569/5"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
david-hagar/NLP-Analytics
|
python-sklearn-kmeans/kmeans_clustering.ipynb
|
mit
|
[
"K-means Clustering in sci-kit learn\nThis example uses a dataset downloaded from https://www.opensubtitles.org/en/search/vip and the raw data at opus.lingfil.uu.se/OpenSubtitles2016/raw/en. Metadata such as title actor and director was scraped from IMDB and is not guaranteed to be complete. This example uses the last 5000 most recent movies. The full archive (1.1 Gig) is here.\nThe code does the following:\n1. counts words \n2. builds a TFIDF weighted vocabulary\n3. Applies the TFIDF weights to the word counts to create a sparse matrix\n4. Runs K-means clustering on the sparce matrix\n5. Prints top words for each cluster using the largest features in the cluster centroid\nBe sure to install the following:\n1. pip3 install sklearn\n2. pip3 install pandas\n2. pip3 install scipy",
"import pandas as pd \n\nimport sys\nsys.version ",
"Unarchive",
"import tempfile\nimport zipfile\nimport os.path\n\nzipFile = \"./openSubtitles-5000.json.zip\"\n\nprint( \"Unarchiving ...\")\ntemp_dir = tempfile.mkdtemp()\nzip_ref = zipfile.ZipFile(zipFile, 'r')\nzip_ref.extractall(temp_dir)\nzip_ref.close()\n\nopenSubtitlesFile = os.path.join(temp_dir, \"openSubtitles-5000.json\")\nprint (\"file unarchived to:\" + openSubtitlesFile)\n",
"Tokenizing and Filtering a Vocabulary",
"\nimport json\nfrom sklearn.feature_extraction.text import CountVectorizer\n#from log_progress import log_progress\n\nmaxDocsToload = 50000\n\ntitles = []\ndef make_corpus(file):\n with open(file) as f:\n for i, line in enumerate(f):\n doc = json.loads(line)\n titles.append(doc.get('Title',''))\n #if 'Sci-Fi' not in doc.get('Genre',''):\n # continue\n if i % 100 == 0:\n print (\"%d \" % i, end='') \n yield doc.get('Text','')\n if i == maxDocsToload:\n break\n \nprint (\"Starting load ...\") \ntextGenerator = make_corpus(openSubtitlesFile) \ncount_vectorizer = CountVectorizer(min_df=2, max_df=0.75, ngram_range=(1,2), max_features=50000,\n stop_words='english', analyzer=\"word\", token_pattern=\"[a-zA-Z]{3,}\")\nterm_freq_matrix = count_vectorizer.fit_transform(textGenerator)\nprint (\"Done.\")\nprint ( \"term_freq_matrix shape = %s\" % (term_freq_matrix.shape,) )\nprint (\"term_freq_matrix = \\n%s\" % term_freq_matrix)\n",
"Feature Vocabulary",
"print( \"Vocabulary length = \", len(count_vectorizer.vocabulary_))\nword = \"data\";\nrainingIndex = count_vectorizer.vocabulary_[word];\nprint( \"token index for \\\"%s\\\" = %d\" % (word,rainingIndex))\nfeature_names = count_vectorizer.get_feature_names()\nprint( \"feature_names[%d] = %s\" % (rainingIndex, feature_names[rainingIndex]))\n\n\nfor i in range(0,1000):\n print( \"feature_names[%d] = %s\" % (i, feature_names[i]))",
"TFIDF Weighting\nThis applys the TFIDF weight to the matrix\ntfidf value = word count / number of documents word is in\nThe document vectors are also normalized so they have a euclidian magnitude of 1.0.",
"from sklearn.feature_extraction.text import TfidfTransformer\n\ntfidf = TfidfTransformer(norm=\"l2\")\ntfidf.fit(term_freq_matrix)\n\ntf_idf_matrix = tfidf.transform(term_freq_matrix)\nprint( tf_idf_matrix)",
"K-Means",
"%%time\nfrom sklearn.cluster import KMeans,MiniBatchKMeans\nimport numpy\n\nnum_clusters = 5\n#km = KMeans(n_clusters=num_clusters, verbose=True, init='k-means++', n_init=3, n_jobs=-1)\nkm = MiniBatchKMeans(n_clusters=num_clusters, verbose=True, init='k-means++', n_init=25, batch_size=2000)\n\nkm.fit(tf_idf_matrix)\n\nclusters = km.labels_.tolist()\nprint (\"cluster id for each document = %s\" % clusters)\n\nprint()\n# sort cluster centers by proximity to centroid\norder_centroids = km.cluster_centers_.argsort()[:, ::-1]\n\n \n\n\nlabels = pd.DataFrame(clusters, columns=['Cluster Labels'])\ncounts = pd.DataFrame(labels['Cluster Labels'].value_counts().sort_index())\ncounts.columns=['Document Count']\ndisplay(counts)\n\ntopNWords = 50\n\ndf = pd.DataFrame()\n\nfor i in range(num_clusters):\n clusterWords = []\n for topWordIndex,ind in enumerate(order_centroids[i, :topNWords]): \n clusterWords.append( feature_names[ind] )\n df['Cluster %d' % i] = pd.Series(clusterWords)\n #dtype='object', data= [''] * topNWords)\n #print(topWordIndex) \n #print(ind)\n #print(feature_names[ind])\n\ndf.style.set_properties(**{'text-align': 'right'})\ndf\n\n\ntitlesFrame = pd.DataFrame()\ntitlesFrame['Labels']=km.labels_\ntitlesFrame['Titles']=titles\n\nsort = titlesFrame.sort_values(by=['Labels','Titles'])\nfor i in range(num_clusters):\n display( sort.query('Labels == %d' % i) )",
"The End ..."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
JohnWinter/JohnWinter.github.io
|
bkds-datachallenge-Winter-John/Data_Challenge_Stats.ipynb
|
mit
|
[
"John Winter Data Challenge",
"import math\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\nimport scipy\nfrom sklearn.decomposition import PCA\n\n\n%matplotlib inline",
"Question 1.a",
"height=np.array([150,163,167,168,170,178])",
"By default numpy uses linear interpolation",
"print 'min',np.min(height)\nprint '1st', np.percentile(height,25)\nprint 'median',np.median(height)\nprint '3rd',np.percentile(height,75)\nprint 'max',np.max(height)",
"We can also force numpy to use nearest values",
"print 'min',np.min(height)\nprint '1st', np.percentile(height,25, interpolation='lower')\nprint 'median',np.median(height)\nprint '3rd',np.percentile(height,75, interpolation='higher')\nprint 'max',np.max(height)",
"Question 1.b",
"print 'mean',np.mean(height)",
"Question 1.c",
"l_q75 = np.percentile(height,75)\nl_q25 = np.percentile(height,25)\nl_iqr = l_q75 - l_q25\n\nprint 'Linear interpolation IQR',l_iqr\n\nq75 = np.percentile(height,75, interpolation='higher')\nq25 = np.percentile(height,25, interpolation='lower')\niqr = q75 - q25\n\nprint 'IQR',iqr",
"Question 1.d",
"l_q25-l_iqr*1.5\n\nl_q75+l_iqr*1.5",
"150 and 178 are both possible outliers based on the IQR 'fence' definition using linear interpolation",
"q25-iqr*1.5\n\nq75+iqr*1.5",
"150 is a possible outlier based on the IQR 'fence' definition using nearest value\nQuestion 1.e",
"seaborn.boxplot(height,whis=1.5,vert='True')\nseaborn.plt.title('Linear Interpolation - Height')\n\n\nitem = {}\n\nitem[\"label\"] = 'box'\n\nitem[\"med\"] = 167.5\nitem[\"q1\"] = 163 \nitem[\"q3\"] = 170\n\nitem[\"whislo\"] = 163 \nitem[\"whishi\"] = 178 \nitem[\"fliers\"] = [] \n\nstats = [item]\n\nfig, axes = plt.subplots(1, 1)\naxes.bxp(stats)\naxes.set_title('Nearest Values - Height')\ny_axis = [150,163,167,168,170,178]\ny_values = ['150','163','167','168','170','178']\nplt.yticks(y_axis, y_values)",
"Question 1.f",
"print 'Variance',height.var()\n\nprint \"Standard Deviation\", height.std()\n",
"Question 2\ni. Metric,Interval/Discrete\nii. Non-metric, Ordinal\niii. Non-metric, Nominal/Categorical\niv. Possibly in between. Without more information would categorize Non-metric, Ordinal\nV. Possible to argue it is in between. I would categorize Non-metric, Ordinal\nQuestion 3\nShorthand used to remember the percentage of values that lie within a band around the mean in a normal distribution with a width of one, two and three standard deviations. \nQuestion 4.a\nThe 84th percentile. Using the rule above we can just do 50(mean)+68(1 std)/2 \nQuestion 4.b\nApproximately 27% of people. I used a standard normal distribution z-table.",
"print 'z-score:',(90-100)/16.0",
"Question 5\nProbability of Silver Given Silver=P(Find S/SC)/(P(Find S/SC)+P(Find S/GC)+P(Find S/MC))\nP(S|S)=1/(1+0+.5)=1/1.5=2/3\nQuestion 6\nIn order for the longer piece to be more than twice the length of the shorter piece, the line must be cut at below 1/3 the length or above 2/3. Therefore the probability is the union of these two or 2/3rds. \nQuestion 7\nProbabilty of flu given positive test = (P(Positive test | given present)*P(Disease present in anyone))/(P(probability of anyone getting positive test result))\n(P(B|A)*P(A))/(P(B))\nGiven:\nP(A) = .1\nP(B|~A) = .01\nP(~B|A) = .03\nDerived:\nP(B|A) = 1-P(~B|A) =.9997\nP(B) = P(B|A) x P(A) + P(B|~A) x P(~A) \nP(B) = .9997.1 + .01.9 =.109\nP(B|A) = .9997*.1/.109 = .917\nProbabilty of flu given positive test is approximately 92%\nQuestion 8\nThe mu over the time period is 100. I calculate the probability of critical failures in all the cases other than 100+ (0-99) and subtract the sum probability from 1",
"total=0\nfor x in range(0,100):\n total+=scipy.stats.distributions.poisson.pmf(x, 100)\n \nprint 'probability:', 1-total\n ",
"51% probability of 100 or more critical failures over the next 50 years. \nQuestion 9.a",
"SE = STD/sqrt(n)\n\n\n.8/math.sqrt(100) \n\n**Question 9.b**",
"Assuming a normal distribution we use the z table to find the corresponding number of standard deviations. \nThe 95% confidence interval is composed of the following:\nLower = 1.6-.08*1.96 = 1.443\nUpper = 1.6+.08*1.96 = 1.757\nQuestion 9.c\nU = Umbrellas/Apartment * Apartments\nU = 12,800\nQuestion 9.d\nSE = STD/sqrt(n)",
".8*8000/math.sqrt(100)",
"Question 9.d\nAssuming a normal distribution we use the z table to find the corresponding number of standard deviations.\nThe 95% confidence interval is composed of the following:\nLower = 12,800-640*1.96 = 11,545.6\nUpper = 12,800+640*1.96 = 14,054.4\nQuestion 10\nFirst I randomly sample from a normal distribution using numpy then count all values within the interval [0,1]",
"random_normal=np.random.normal(size=1000)\n\nseaborn.distplot(random_normal)\n\nwithin=((0 < x) & (x < 1)).sum()\n\nwithin/1000.0",
"33.3% of the randomly generated values were in the interval [0,1]\nQuestion 11\nThere is not enough information available to determine if the promotion was effective. \nThe month to month variance may be such that 350 is a typical occurance, and so the observation would have nothing to do with the promotion. \nQuestion 12\nTo answer this question I used a ttest to find whether the samples were drawn from the same population. If they are drawn from the same population then the p-value will be large, which suggests it would be likey to randomly draw these samples from the same population.",
"t,p_value=scipy.stats.ttest_ind([79.98,80.04,80.02,80.04,80.03,80.03,80.04,79.97,80.05,80.03,80.02],[80.02,79.94,79.98,79.97,79.97,80.03,79.95,79.97])\n\np_value\n\n1-p",
"This p_value is quite small. It is outside a 99% confidence interval so we would reject the null hypothesis and say that the results from the two methods differ.\nAs a check I plot the two data sets and see for myself that distributions do in fact look significantly different.",
"data={'a':[79.98,80.04,80.02,80.04,80.03,80.03,80.04,79.97,80.05,80.03,80.02],'b':[80.02,79.94,79.98,79.97,79.97,80.03,79.95,79.97]}\n\nax=seaborn.boxplot(data['a'])\nax.set_xlim([79.94,80.05])\n\nax=seaborn.boxplot(data['b'])\nax.set_xlim([79.94,80.05])\n\n**Question 13**",
"First perform chisquared test. Also will look at a a visual check.",
"air=pd.DataFrame(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'],[1668,1407,1370,1309,1341,1338,1406,1446,1332,1363,1410,1526])\nair=air.reset_index()\nair.columns=['guests','month']\nair['expected']=[sum(air['guests'])/12]*12\nscipy.stats.chisquare(air['guests'],air['expected'])",
"The low p-value indicates that it is extremely unlikey that the bookings are uniformly distributed. It is possible that there is a season pattern.",
"air.plot()\n\nair_season=pd.DataFrame(['Spring','Summer','Fall','Winter'],[(1370+1309+1341),(1338+1406+1446),(1332+1363+1410),(1526+1668+1407)])\nair_season=air_season.reset_index()\nair_season.columns=['guests','month']\nair_season['expected']=[sum(air_season['guests'])/4]*4\nair_season.plot()",
"The visuals reinforce the evidence that the bookings are not uniformly distributed.",
"x=np.dot([[2,3],[2,1]],[[3],[2]])\n\nx=np.dot([[3,0,1],[-4,1,2],[-6,0,-2]],[[-1],[1],[3]])\n\nx\n\nx/4",
"Question 14.a",
"q14=pd.DataFrame([16,12,13,11,10,9,8,7,5,3,2,0],[8,10,6,2,8,-1,4,6,-3,-1,-3,0])\nq14=q14.reset_index()\nq14.columns=['x2','x1']\n\nnp.corrcoef(q14['x1'],q14['x2'])\n\nplt.scatter(q14['x1'],q14['x2'])\nplt.axis('equal');",
"The correlation of X1 and X2 is .74 and PCA will provide information about the nature of the linear relationship. If the question is to whether we can use PCA to cut out data and only retain the component with the highest variance I would lean towards no, but it is difficult to say without background information on the data and the desired use. \nQuestion 14.b",
"X=np.column_stack((q14['x1'],q14['x2']))\n\npca = PCA(n_components=2)\n\npca.fit(X)\n\npca.get_covariance() #Uses n vs. n-1\n\nprint(pca.components_) #Eigenvectors \n\n**Question 14.c**\n\nprint(pca.explained_variance_) #Eigenvalues, variance\n\ndef draw_vector(v0, v1, ax=None):\n ax = ax or plt.gca()\n arrowprops=dict(arrowstyle='->',linewidth=2,shrinkA=0, shrinkB=0)\n ax.annotate('', v1, v0, arrowprops=arrowprops)\n# plot data\nplt.scatter(q14['x1'],q14['x2'], alpha=0.2)\nfor length, vector in zip(pca.explained_variance_, pca.components_):\n v = vector * 1 * np.sqrt(length)\n draw_vector(pca.mean_, pca.mean_ + v)\n\n\n**Question 14.d**\n\nprint(pca.explained_variance_ratio_)",
"More than 87% of the variability is explained by a single component."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
mne-tools/mne-tools.github.io
|
0.23/_downloads/e41b6a898e7a75f8a9f1a6c00ca73857/20_visualize_epochs.ipynb
|
bsd-3-clause
|
[
"%matplotlib inline",
"Visualizing epoched data\nThis tutorial shows how to plot epoched data as time series, how to plot the\nspectral density of epoched data, how to plot epochs as an imagemap, and how to\nplot the sensor locations and projectors stored in ~mne.Epochs\nobjects.\nWe'll start by importing the modules we need, loading the continuous (raw)\nsample data, and cropping it to save memory:",
"import os\nimport mne\n\nsample_data_folder = mne.datasets.sample.data_path()\nsample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_raw.fif')\nraw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False).crop(tmax=120)",
"To create the ~mne.Epochs data structure, we'll extract the event\nIDs stored in the :term:stim channel, map those integer event IDs to more\ndescriptive condition labels using an event dictionary, and pass those to the\n~mne.Epochs constructor, along with the ~mne.io.Raw data\nand the desired temporal limits of our epochs, tmin and tmax (for a\ndetailed explanation of these steps, see tut-epochs-class).",
"events = mne.find_events(raw, stim_channel='STI 014')\nevent_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,\n 'visual/right': 4, 'face': 5, 'button': 32}\nepochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, event_id=event_dict,\n preload=True)\ndel raw",
"Plotting Epochs as time series\n.. sidebar:: Interactivity in pipelines and scripts\nTo use the interactive features of the `~mne.Epochs.plot` method\nwhen running your code non-interactively, pass the ``block=True``\nparameter, which halts the Python interpreter until the figure window is\nclosed. That way, any channels or epochs that you mark as \"bad\" will be\ntaken into account in subsequent processing steps.\n\nTo visualize epoched data as time series (one time series per channel), the\nmne.Epochs.plot method is available. It creates an interactive window\nwhere you can scroll through epochs and channels, enable/disable any\nunapplied :term:SSP projectors <projector> to see how they affect the\nsignal, and even manually mark bad channels (by clicking the channel name) or\nbad epochs (by clicking the data) for later dropping. Channels marked \"bad\"\nwill be shown in light grey color and will be added to\nepochs.info['bads']; epochs marked as bad will be indicated as 'USER'\nin epochs.drop_log.\nHere we'll plot only the \"catch\" trials from the sample dataset\n<sample-dataset>, and pass in our events array so that the button press\nresponses also get marked (we'll plot them in red, and plot the \"face\" events\ndefining time zero for each epoch in blue). We also need to pass in\nour event_dict so that the ~mne.Epochs.plot method will know what\nwe mean by \"button\" — this is because subsetting the conditions by\ncalling epochs['face'] automatically purges the dropped entries from\nepochs.event_id:",
"catch_trials_and_buttonpresses = mne.pick_events(events, include=[5, 32])\nepochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,\n event_color=dict(button='red', face='blue'))",
"To see all sensors at once, we can use butterfly mode and group by selection:",
"epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,\n event_color=dict(button='red', face='blue'),\n group_by='selection', butterfly=True)",
"Plotting projectors from an Epochs object\nIn the plot above we can see heartbeat artifacts in the magnetometer\nchannels, so before we continue let's load ECG projectors from disk and apply\nthem to the data:",
"ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',\n 'sample_audvis_ecg-proj.fif')\necg_projs = mne.read_proj(ecg_proj_file)\nepochs.add_proj(ecg_projs)\nepochs.apply_proj()",
"Just as we saw in the tut-section-raw-plot-proj section, we can plot\nthe projectors present in an ~mne.Epochs object using the same\n~mne.Epochs.plot_projs_topomap method. Since the original three\nempty-room magnetometer projectors were inherited from the\n~mne.io.Raw file, and we added two ECG projectors for each sensor\ntype, we should see nine projector topomaps:",
"epochs.plot_projs_topomap(vlim='joint')",
"Note that these field maps illustrate aspects of the signal that have\nalready been removed (because projectors in ~mne.io.Raw data are\napplied by default when epoching, and because we called\n~mne.Epochs.apply_proj after adding additional ECG projectors from\nfile). You can check this by examining the 'active' field of the\nprojectors:",
"print(all(proj['active'] for proj in epochs.info['projs']))",
"Plotting sensor locations\nJust like ~mne.io.Raw objects, ~mne.Epochs objects\nkeep track of sensor locations, which can be visualized with the\n~mne.Epochs.plot_sensors method:",
"epochs.plot_sensors(kind='3d', ch_type='all')\nepochs.plot_sensors(kind='topomap', ch_type='all')",
"Plotting the power spectrum of Epochs\nAgain, just like ~mne.io.Raw objects, ~mne.Epochs objects\nhave a ~mne.Epochs.plot_psd method for plotting the spectral\ndensity_ of the data.",
"epochs['auditory'].plot_psd(picks='eeg')",
"It is also possible to plot spectral estimates across sensors as a scalp\ntopography, using ~mne.Epochs.plot_psd_topomap. The default parameters will\nplot five frequency bands (δ, θ, α, β, γ), will compute power based on\nmagnetometer channels, and will plot the power estimates in decibels:",
"epochs['visual/right'].plot_psd_topomap()",
"Just like ~mne.Epochs.plot_projs_topomap,\n~mne.Epochs.plot_psd_topomap has a vlim='joint' option for fixing\nthe colorbar limits jointly across all subplots, to give a better sense of\nthe relative magnitude in each band. You can change which channel type is\nused via the ch_type parameter, and if you want to view different\nfrequency bands than the defaults, the bands parameter takes a list of\ntuples, with each tuple containing either a single frequency and a subplot\ntitle, or lower/upper frequency limits and a subplot title:",
"bands = [(10, '10 Hz'), (15, '15 Hz'), (20, '20 Hz'), (10, 20, '10-20 Hz')]\nepochs['visual/right'].plot_psd_topomap(bands=bands, vlim='joint',\n ch_type='grad')",
"If you prefer untransformed power estimates, you can pass dB=False. It is\nalso possible to normalize the power estimates by dividing by the total power\nacross all frequencies, by passing normalize=True. See the docstring of\n~mne.Epochs.plot_psd_topomap for details.\nPlotting Epochs as an image map\nA convenient way to visualize many epochs simultaneously is to plot them as\nan image map, with each row of pixels in the image representing a single\nepoch, the horizontal axis representing time, and each pixel's color\nrepresenting the signal value at that time sample for that epoch. Of course,\nthis requires either a separate image map for each channel, or some way of\ncombining information across channels. The latter is possible using the\n~mne.Epochs.plot_image method; the former can be achieved with the\n~mne.Epochs.plot_image method (one channel at a time) or with the\n~mne.Epochs.plot_topo_image method (all sensors at once).\nBy default, the image map generated by ~mne.Epochs.plot_image will be\naccompanied by a scalebar indicating the range of the colormap, and a time\nseries showing the average signal across epochs and a bootstrapped 95%\nconfidence band around the mean. ~mne.Epochs.plot_image is a highly\ncustomizable method with many parameters, including customization of the\nauxiliary colorbar and averaged time series subplots. See the docstrings of\n~mne.Epochs.plot_image and mne.viz.plot_compare_evokeds (which is\nused to plot the average time series) for full details. Here we'll show the\nmean across magnetometers for all epochs with an auditory stimulus:",
"epochs['auditory'].plot_image(picks='mag', combine='mean')",
"To plot image maps for individual sensors or a small group of sensors, use\nthe picks parameter. Passing combine=None (the default) will yield\nseparate plots for each sensor in picks; passing combine='gfp' will\nplot the global field power (useful for combining sensors that respond with\nopposite polarity).",
"epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'])\nepochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'], combine='gfp')",
"To plot an image map for all sensors, use\n~mne.Epochs.plot_topo_image, which is optimized for plotting a large\nnumber of image maps simultaneously, and (in interactive sessions) allows you\nto click on each small image map to pop open a separate figure with the\nfull-sized image plot (as if you had called ~mne.Epochs.plot_image on\njust that sensor). At the small scale shown in this tutorial it's hard to see\nmuch useful detail in these plots; it's often best when plotting\ninteractively to maximize the topo image plots to fullscreen. The default is\na figure with black background, so here we specify a white background and\nblack foreground text. By default ~mne.Epochs.plot_topo_image will\nshow magnetometers and gradiometers on the same plot (and hence not show a\ncolorbar, since the sensors are on different scales) so we'll also pass a\n~mne.channels.Layout restricting each plot to one channel type.\nFirst, however, we'll also drop any epochs that have unusually high signal\nlevels, because they can cause the colormap limits to be too extreme and\ntherefore mask smaller signal fluctuations of interest.",
"reject_criteria = dict(mag=3000e-15, # 3000 fT\n grad=3000e-13, # 3000 fT/cm\n eeg=150e-6) # 150 µV\nepochs.drop_bad(reject=reject_criteria)\n\nfor ch_type, title in dict(mag='Magnetometers', grad='Gradiometers').items():\n layout = mne.channels.find_layout(epochs.info, ch_type=ch_type)\n epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',\n font_color='k', title=title)",
"To plot image maps for all EEG sensors, pass an EEG layout as the layout\nparameter of ~mne.Epochs.plot_topo_image. Note also here the use of\nthe sigma parameter, which smooths each image map along the vertical\ndimension (across epochs) which can make it easier to see patterns across the\nsmall image maps (by smearing noisy epochs onto their neighbors, while\nreinforcing parts of the image where adjacent epochs are similar). However,\nsigma can also disguise epochs that have persistent extreme values and\nmaybe should have been excluded, so it should be used with caution.",
"layout = mne.channels.find_layout(epochs.info, ch_type='eeg')\nepochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',\n font_color='k', sigma=1)",
".. LINKS"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
AllenDowney/ProbablyOverthinkingIt
|
frame_example.ipynb
|
mit
|
[
"Vectors, Frames, and Transforms\nA quick example using NetworkX to implement a basic graph algorithm.\nAllen B. Downey\nMIT License",
"# this line makes Jupyter show figures in the notebook\n%matplotlib inline",
"Vector represents a Euclidean vector; it is implemented using a NumPy array of coordinates and a reference to the frame those coordinates are defined in.",
"class FrameError(ValueError):\n \"\"\"Indicates an error related to Frames.\"\"\"\n\nclass Vector:\n def __init__(self, array, frame=None):\n \"\"\"A vector is an array of coordinates and a frame of reference.\n\n array: sequence of coordinates\n frame: Frame object\n \"\"\"\n self.array = np.asarray(array)\n self.frame = frame\n\n def __str__(self):\n if self.frame == None:\n return '^{O}%s' % (str(self.array), )\n else:\n return '^{%s}%s' % (str(self.frame), str(self.array))\n \n def __repr__(self):\n return 'Frame(%s, %s)' % (str(self.frame), str(self.array))\n\n def __add__(self, other):\n if self.frame != other.frame:\n raise FrameError(\"Vectors must be relative to the same frame.\")\n\n return Vector(self.array + other.array, self.frame)\n",
"Rotation represents a rotation matrix, one of several kinds of transformation matrices. We'll use it as part of the implementation of Transform.",
"class Rotation:\n def __init__(self, array):\n self.array = array\n \n def __str__(self):\n return 'Rotation\\n%s' % str(self.array)\n \n __repr__ = __str__\n\n\n def __neg__(self):\n return Rotation(-self.array)\n\n def __mul__(self, other):\n \"\"\"Apply the rotation to a Vector.\"\"\"\n return np.dot(self.array, other.array)\n\n __call__ = __mul__\n\n @staticmethod\n def from_axis(axis, theta):\n x, y, z = np.ravel(axis.array)\n c = np.cos(theta)\n u = 1.0-c\n s = np.sqrt(1.0-c*c)\n xu, yu, zu = x*u, y*u, z*u\n v1 = [x*xu + c, x*yu - z*s, x*zu + y*s]\n v2 = [x*yu + z*s, y*yu + c, y*zu - x*s]\n v3 = [x*zu - y*s, y*zu + x*s, z*zu + c]\n return Rotation(np.array([v1, v2, v3]))\n\n def to_axis(self):\n # return the equivalent angle-axis as (khat, theta)\n pass\n\n def transpose(self):\n return Rotation(np.transpose(self.array))\n\n inverse = transpose\n \n",
"A Transform is a rotation (represented by a Rotation object) and an origin (represented by a Vector). The destination of the transform is the frame of the origin vector. The source of the transform is provided as an argument.\nWhen you create a transform, it adds itself to the source frame.",
"class Transform:\n \"\"\"Represents a transform from one Frame to another.\"\"\"\n\n def __init__(self, rot, org, source=None):\n \"\"\"Instantiates a Transform.\n\n rot: Rotation object\n org: origin Vector\n source: source Frame\n \"\"\"\n self.rot = rot\n self.org = org\n self.dest = org.frame\n self.source = source\n self.source.add_transform(self)\n\n def __str__(self):\n \"\"\"Returns a string representation of the Transform.\"\"\"\n if self.dest == None:\n return '%s' % self.source.name\n return '_{%s}^{O}T' % self.source.name\n else:\n return '_{%s}^{%s}T' % (self.source.name, self.dest.name)\n \n __repr__ = __str__\n \n def __mul__(self, other):\n \"\"\"Applies a Transform to a Vector or Transform.\"\"\"\n if isinstance(other, Vector):\n return self.mul_vector(other)\n\n if isinstance(other, Transform):\n return self.mul_transform(other)\n\n __call__ = __mul__\n\n def mul_vector(self, p):\n \"\"\"Applies a Transform to a Vector.\n\n p: Vector\n\n Returns: Vector\n \"\"\"\n if p.frame != self.source:\n raise FrameError(\n \"The frame of the vector must be the source of the transform\")\n return Vector(self.rot * p, self.dest) + self.org\n\n def mul_transform(self, other):\n \"\"\"Applies a Transform to another Transform.\n\n other: Transform\n\n Returns Transform\n \"\"\"\n if other.dest != self.source:\n raise FrameError(\n \"This frames source must be the other frame's destination.\")\n\n rot = Rotation(self.rot * other.rot)\n t = Transform(rot, self * other.org, other.source)\n return t\n\n def inverse(self):\n \"\"\"Computes the inverse transform.\n\n Returns: Transform\n \"\"\"\n irot = self.rot.inverse()\n iorg = Vector(-(irot * self.org), self.source)\n t = Transform(irot, iorg, self.dest)\n return t\n\n",
"A Frame has a name and a dictionary that includes the frames we can reach directly from this frame, and the transform that gets there.\nThe roster is a list of all frames.",
"class Frame:\n \"\"\"Represents a frame of reference.\"\"\"\n\n # list of Frames\n roster = []\n \n def __init__(self, name):\n \"\"\"Instantiate a Frame.\n\n name: string\n \"\"\"\n self.name = name\n self.transforms = {}\n Frame.roster.append(self)\n\n def __str__(self): \n return self.name\n \n __repr__ = __str__\n\n def add_transform(self, transform):\n \"\"\"A frames is defined by a Transform relative to another Frame.\n\n transform: Transform object\n \"\"\"\n if transform.source != self:\n raise FrameError(\"Source of the transform must be this Frame.\")\n\n if transform.dest:\n self.transforms[transform.dest] = transform\n\n def dests(self):\n \"\"\"Returns a list of the Frames we know how to Transform to.\"\"\"\n return self.transforms.keys()",
"We'll start with one frame that is not defined relative to any other frame.",
"origin = Frame('O')\norigin",
"Now we'll create Frame A, which is defined by a transform relative to O.\nThe string representation of a Frame is in LaTex.",
"import numpy as np\n\ntheta = np.pi/2\nxhat = Vector([1, 0, 0], origin)\nrx = Rotation.from_axis(xhat, theta)\na = Frame('A')\nt_ao = Transform(rx, xhat, a)\nt_ao",
"We can use IPython.display to render the LaTeX:",
"from IPython.display import Math\n\ndef render(obj):\n return Math(str(obj))",
"Here's the usual notation for the transform from A to O.",
"render(t_ao)",
"Here's Frame B, defined relative to A by a rotation around the yhat axis.",
"yhat = Vector([0, 1, 0], a)\nry = Rotation.from_axis(yhat, theta)\nb = Frame('B')\nt_ba = Transform(ry, yhat, b)\nrender(t_ba)",
"A Frame C, defined relative to B by a rotation around the zhat axis.",
"zhat = Vector([0, 0, 1], b)\nrz = Rotation.from_axis(zhat, theta)\nc = Frame('C') \nt_cb = Transform(rz, zhat, c)\nrender(t_cb)",
"Now let's make a vector defined in C.",
"p_c = Vector([1, 1, 1], c)\nrender(p_c)",
"And we can transform it to B:",
"p_b = t_cb(p_c)\nrender(p_b)",
"Then to A:",
"p_a = t_ba(p_b)\nrender(p_a)",
"And finally to O.",
"p = t_ao(p_a)\nrender(p)",
"If we didn't know how to get from one frame to another, we could search for the shortest path from the start frame to the destination. I'll use NetworkX.",
"import networkx as nx",
"The following function adds the edges from a given frame to the graph.",
"def add_edges(G, frame):\n for neighbor, transform in frame.transforms.items():\n G.add_edge(frame, neighbor, transform=transform)",
"And here's how we can make a graph from a list of frames.",
"def make_graph(frames):\n G = nx.DiGraph()\n for frame in frames:\n add_edges(G, frame)\n return G",
"Here's the list of frames:",
"frames = Frame.roster\nframes",
"And a dictionary that maps from each frame to its label:",
"labels = dict([(frame, str(frame)) for frame in frames])\nlabels",
"So we can show the frames, and transforms between them, graphically.",
"G = make_graph(Frame.roster)\nnx.draw(G, labels=labels)\n\nnx.shortest_path(G, c, origin)",
"When we apply a transform to a vector, we get a vector in a new frame.\nWhen we apply a transform to another transform, we get a new transform that composes the two transforms.\nFor example cbao, below, composes the transforms from C to B, C to A, and A to O. The result is a transform directly from C to O.",
"cbao = t_ao(t_ba(t_cb))\nrender(cbao)\n\np = cbao(p_c)\nrender(p)",
"When we create the new transform, it gets added to the network, creating shortcuts.\nIf we draw the network again, we can see the new links.",
"G = make_graph([origin, a, b, c])\nnx.draw(G, labels=labels)",
"And if we find the shortest path, its shorter now.",
"nx.shortest_path(G, c, origin)",
"We can also compute an inverse transform that goes in the other direction.",
"inv = cbao.inverse()\nrender(inv)",
"And confirm that it gets us back where we started.",
"p_c = inv(p)\nrender(p_c)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
ltiao/project-euler
|
problem-9-special-pythagorean-triplet.ipynb
|
unlicense
|
[
"A Pythagorean triplet is a set of three natural numbers, $a < b < c$, for which,\n\\begin{equation}\na^2 + b^2 = c^2\n\\end{equation}\nFor example, $3^2 + 4^2 = 9 + 16 = 25 = 5^2$.\nThere exists exactly one Pythagorean triplet for which $a + b + c = 1000$.\nFind the product $abc$.\nRemark\nThis is a fairly straighforward constraint satisfaction problem (CSP) and is perhaps most easily solved in a CSP modelling language such as MiniZinc. However, to employ such tools would be to defeat the very purpose of the exercise, which is to give us practice with implementation.\n<!-- TEASER_END -->",
"from six.moves import range, reduce",
"Version 1: The Obvious",
"pair_sum_eq = lambda n, start=0: ((i, n-i) for i in range(start, (n>>1)+1))\n\nlist(pair_sum_eq(21, 5))",
"Note that $3a < a + b + c = 1000$, so $a < \\frac{1000}{3} \\Leftrightarrow a \\leq \\lfloor \\frac{1000}{3} \\rfloor = 333$ so $1 \\leq a \\leq 333$. Therefore, we need only iterate up to 333 in the outermost loop. Now, $b + c = 1000 - a$, so $667 \\leq b + c \\leq 999$, so we look at all pairs $333 \\leq b < c$ such that $b + c = 1000 - a$ with the help of the function pair_sum_eq. Within the innermost loop, the $a, b, c$ now satisfy the constraints $a < b < c$ and $a + b + c = 1000$ so now we need only check that they indeed form a Pythagorean triplet, i.e. $a^2 + b^2 = c^2$, and yield it.\n<!-- TEASER_END -->",
"def pythagorean_triplet_sum_eq(n):\n for a in range(1, n//3+1):\n for b, c in pair_sum_eq(n-a, start=n//3):\n if a*a + b*b == c*c:\n yield a, b, c\n\nlist(pythagorean_triplet_sum_eq(1000))\n\nprod = lambda iterable: reduce(lambda x,y: x*y, iterable)\n\nprod(pythagorean_triplet_sum_eq(1000))",
"Version 2: Euclid's Formula",
"# TODO"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
statsmodels/statsmodels.github.io
|
v0.12.2/examples/notebooks/generated/mediation_survival.ipynb
|
bsd-3-clause
|
[
"Mediation analysis with duration data\nThis notebook demonstrates mediation analysis when the\nmediator and outcome are duration variables, modeled\nusing proportional hazards regression. These examples\nare based on simulated data.",
"import pandas as pd\nimport numpy as np\nimport statsmodels.api as sm\nfrom statsmodels.stats.mediation import Mediation",
"Make the notebook reproducible.",
"np.random.seed(3424)",
"Specify a sample size.",
"n = 1000",
"Generate an exposure variable.",
"exp = np.random.normal(size=n)",
"Generate a mediator variable.",
"def gen_mediator():\n mn = np.exp(exp)\n mtime0 = -mn * np.log(np.random.uniform(size=n))\n ctime = -2 * mn * np.log(np.random.uniform(size=n))\n mstatus = (ctime >= mtime0).astype(np.int)\n mtime = np.where(mtime0 <= ctime, mtime0, ctime)\n return mtime0, mtime, mstatus",
"Generate an outcome variable.",
"def gen_outcome(otype, mtime0):\n if otype == \"full\":\n lp = 0.5*mtime0\n elif otype == \"no\":\n lp = exp\n else:\n lp = exp + mtime0\n mn = np.exp(-lp)\n ytime0 = -mn * np.log(np.random.uniform(size=n))\n ctime = -2 * mn * np.log(np.random.uniform(size=n))\n ystatus = (ctime >= ytime0).astype(np.int)\n ytime = np.where(ytime0 <= ctime, ytime0, ctime)\n return ytime, ystatus",
"Build a dataframe containing all the relevant variables.",
"def build_df(ytime, ystatus, mtime0, mtime, mstatus):\n df = pd.DataFrame({\"ytime\": ytime, \"ystatus\": ystatus,\n \"mtime\": mtime, \"mstatus\": mstatus,\n \"exp\": exp})\n return df",
"Run the full simulation and analysis, under a particular\npopulation structure of mediation.",
"def run(otype):\n\n mtime0, mtime, mstatus = gen_mediator()\n ytime, ystatus = gen_outcome(otype, mtime0)\n df = build_df(ytime, ystatus, mtime0, mtime, mstatus)\n\n outcome_model = sm.PHReg.from_formula(\"ytime ~ exp + mtime\", status=\"ystatus\", data=df)\n mediator_model = sm.PHReg.from_formula(\"mtime ~ exp\", status=\"mstatus\", data=df)\n\n med = Mediation(outcome_model, mediator_model, \"exp\", \"mtime\",\n outcome_predict_kwargs={\"pred_only\": True})\n med_result = med.fit(n_rep=20)\n print(med_result.summary())",
"Run the example with full mediation",
"run(\"full\")",
"Run the example with partial mediation",
"run(\"partial\")",
"Run the example with no mediation",
"run(\"no\")"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
espressomd/espresso
|
doc/tutorials/error_analysis/error_analysis_part2.ipynb
|
gpl-3.0
|
[
"Tutorial: Error Estimation - Part 2 (Autocorrelation Analysis)\nTable of contents\n\nData generation\nIntroduction\nComputing the auto-covariance function\nAutocorrelation time\nReferences\n\nData generation\nThis first code cell will provide us with the same two data sets as in the previous part of this tutorial. We will use them to get familiar with the autocorrelation analysis method of error estimation.",
"import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'font.size': 18})\nimport sys\nimport logging\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\n\nnp.random.seed(43)\n\ndef ar_1_process(n_samples, c, phi, eps):\n '''\n Generate a correlated random sequence with the AR(1) process.\n\n Parameters\n ----------\n n_samples: :obj:`int`\n Sample size.\n c: :obj:`float`\n Constant term.\n phi: :obj:`float`\n Correlation magnitude.\n eps: :obj:`float`\n Shock magnitude.\n '''\n ys = np.zeros(n_samples)\n if abs(phi) >= 1:\n raise ValueError(\"abs(phi) must be smaller than 1.\")\n # draw initial value from normal distribution with known mean and variance\n ys[0] = np.random.normal(loc=c / (1 - phi), scale=np.sqrt(eps**2 / (1 - phi**2)))\n for i in range(1, n_samples):\n ys[i] = c + phi * ys[i - 1] + np.random.normal(loc=0., scale=eps)\n return ys\n\n# generate simulation data using the AR(1) process\n\nlogging.info(\"Generating data sets for the tutorial ...\")\n\nN_SAMPLES = 100000\n\nC_1 = 2.0\nPHI_1 = 0.85\nEPS_1 = 2.0\ntime_series_1 = ar_1_process(N_SAMPLES, C_1, PHI_1, EPS_1)\n\nC_2 = 0.05\nPHI_2 = 0.999\nEPS_2 = 1.0\ntime_series_2 = ar_1_process(N_SAMPLES, C_2, PHI_2, EPS_2)\n\nlogging.info(\"Done\")\n\nfig = plt.figure(figsize=(10, 6))\nplt.title(\"The first 1000 samples of both time series\")\nplt.plot(time_series_1[0:1000], label=\"time series 1\")\nplt.plot(time_series_2[0:1000], label=\"time series 2\")\nplt.xlabel(\"$i$\")\nplt.ylabel(\"$X_i$\")\nplt.legend()\nplt.show()",
"Introduction\nIn the first part of the error analysis tutorial we have introduced the binning analysis, an easy and common tool for error estimation. However, we have seen that it failed to deliver an estimate for our second data set. In this tutorial, we will get to know a different method: the autocorrelation analysis, sometimes also called auto covariance analysis. It not only delivers an estimate for the standard error of the mean (SEM), but also information on the correlations and the optimal sampling rate.\nBefore we start computing anything, we will give a brief overview over the relevant quantities and how they relate to each other. This outlines how one would go about computing these quantities. The end goal of this process is to define an estimator for the standard error of the mean $\\sigma_\\overline{X}$. And if the data allows for it, it can be calculated. If it fails, autocorrelation analysis provides more insight into the causes of the failure than the binning analysis from the first part of this tutorial. Albeit being more involved, it can provide a valuable tool for systems with difficult statistics.\nLet us begin the theory by defining the auto-covariance function $R^{XX}(\\tau)$ of an observable $X$, at lag time $\\tau$:\n$\n\\begin{align}\n R^{XX}(\\tau) &\\equiv \\langle (X(t)-\\langle X \\rangle)(X(t+\\tau)-\\langle X \\rangle) \\rangle \\\n &= \\langle X(t) X(t+\\tau) \\rangle - \\langle X \\rangle^2, \\tag{1}\n\\end{align}\n$\nwhere $\\langle \\dots \\rangle$ denotes the ensemble average of the expression inside the angled brackets — e.g. $\\langle X \\rangle$ is the true mean value of the observable $X$. In the previous part we have established an understanding of correlations as being the \"similarity\" of successive samples. This is an intuitive but inaccurate understanding. The auto-covariance function provides a means to measure and quantify correlation.\nComputing the auto-covariance for $\\tau=0$ yields the variance $\\sigma=\\langle X^2 \\rangle - \\langle X \\rangle^2$. Normalizing the auto-covariance function by the variance yields the autocorrelation function (ACF)\n$\n\\begin{align}\n A^{XX}(\\tau) = \\frac{R^{XX}(\\tau)}{R^{XX}(0)} = \\frac{\\langle X(t) X(t+\\tau) \\rangle - \\langle X \\rangle^2}{\\langle X^2 \\rangle - \\langle X \\rangle^2}. \\tag{2}\n\\end{align}\n$\nThe ACF can be used to estimate the correlation time $\\tau_X$. Often, this can be simply done by fitting an exponential function to $A^{XX}$, from which we extract $\\tau_{X, \\mathrm{exp}}$ as the inverse decay rate. However, the ACF doesn't necessarily assume the shape of an exponential. That is when another quantity, called the integrated autocorrelation time\n$\n\\begin{align}\n \\tau_{X, \\mathrm{int}} \\equiv \\int_0^\\infty A^{XX}(\\tau) \\mathrm{d}\\tau \\tag{3}\n\\end{align}\n$\ncomes into play. Those two correlation times $\\tau_{X, \\mathrm{int}}$ and $\\tau_{X, \\mathrm{exp}}$ are identical for exponential ACFs, but if the ACF isn't exponential, $\\tau_{X, \\mathrm{int}}$ is the only meaningful quantity. It is related to the effective number of samples\n$\n\\begin{align}\n N_\\mathrm{eff} = \\frac{N}{2 \\tau_{X, \\mathrm{int}}} \\tag{4}\n\\end{align}\n$\nand also to the standard error of the mean (SEM)\n$\n\\begin{align}\n \\sigma_\\overline{X} = \\sqrt{\\frac{2 \\sigma_X^2 \\tau_{X, \\mathrm{int}}}{N}} = \\sqrt{\\frac{\\sigma_X^2}{N_\\mathrm{eff}}}. \\tag{5}\n\\end{align}\n$\nwhere $\\sigma_X^2 = \\langle X^2 \\rangle-\\langle X \\rangle ^2$ is the variance of the observable $X$. \nComputing the auto-covariance function\nEquations (1) and (2) involve an infinite, continuous time series $X(t)$. In the simulation world however, we work with finite, discrete time series. These limitations dictate how we can estimate the true (unknown) autocorrelation function. For a finite, time-discrete set of samples $X_i$, a commonly used estimator is the following expression\n$\n\\begin{align}\n \\hat{R}^{XX}j = \\frac{1}{N} \\sum^{N-|j|}{i=1}(X_i-\\overline{X})(X_{i+|j|}-\\overline{X}), \\tag{6}\n\\end{align}\n$\nwhere $N$ is the total number of samples, and $\\overline{X}=\\frac{1}{N}\\sum_{i=1}^N X_i$ is the average of all samples. This estimates the auto-covariance function at lag time $\\tau=j\\Delta t$ where $\\Delta t$ is the time separation between samples.\nBefore we continue, we want to notify the reader about a few subtleties regarding this estimator:\n* Ideally, we would use $\\langle X \\rangle$ instead of $\\overline{X}$, since the latter is only an estimate of the former. In most cases we don't know $\\langle X \\rangle$, thus we introduce a small unknown bias by using the estimated mean $\\overline{X}$ instead.\n* Actually, the sum does not contain $N$ terms, but $N-|j|$ terms. Consequently, we should divide the whole sum by $N-|j|$ and not by $N$. In fact, this approach yields a different estimator to the auto-covariance function (the so-called unbiased estimator). However, for large $N$ and small $j$, both estimators yield similar results. This is why the simpler $N$ is commonly used anyway.\nExercise\nCompute the auto-covariance function of the data in time_series_1 using the estimator in equation (6) and store it into a numpy array called autocov. Compute it for all $j$ from 0 up to 999. Plot it against $j$.\n```python\nnaive Python solution\nautocov = np.zeros(300)\navg = np.average(time_series_1)\nfor j in range(300):\n temp = 0.\n for i in range(N_SAMPLES - j):\n temp += (time_series_1[i] - avg) * (time_series_1[i + j] - avg)\n autocov[j] = temp / N_SAMPLES\nfig = plt.figure(figsize=(10, 6))\nplt.plot(autocov)\nplt.xlabel(\"lag time $j$\")\nplt.ylabel(\"$\\hat{R}^{XX}_j$\")\nplt.show()\n```\nDepending on your implementation, this computation might have taken a significant amount of time (up to a couple tens of seconds). When doing a lot of these computations, using highly optimized routines for numerics can save a lot of time. The following example shows how to utilize the common Numpy package to do the job quicker.",
"# Numpy solution\ntime_series_1_centered = time_series_1 - np.average(time_series_1)\nautocov = np.empty(1000)\n\nfor j in range(1000):\n autocov[j] = np.dot(time_series_1_centered[:N_SAMPLES - j], time_series_1_centered[j:])\nautocov /= N_SAMPLES\n\nfig = plt.figure(figsize=(10, 6))\nplt.gca().axhline(0, color=\"gray\", linewidth=1)\nplt.plot(autocov)\nplt.xlabel(\"lag time $j$\")\nplt.ylabel(\"$\\hat{R}^{XX}_j$\")\nplt.show()",
"We can see that the auto-covariance function starts at a high value and decreases quickly into a long noisy tail which fluctuates around zero. The high values at short lag times indicate that there are strong correlations at short time scales, as expected. However, even though the tail looks uninteresting, it can bear important information about the statistics of your data. Small systematic deviations from 0 in the tail can be a hint that long-term correlations exist in your system. On the other hand, if there is no sign of a systematic deviation from 0 in the tail, this usually means that the correlation is decaying well within the simulation time, and that the statistics are good enough to estimate an error. In the above example, the correlation quickly decays to zero. Despite the noise in the tail, the statistics seem very reasonable.\nAutocorrelation time\nContinuing our example, we can zoom into the first part of the auto-covariance function (using a log scale). We see that it indeed does have similarities with an exponential decay curve. In general, it isn't an exponential, but often can be approximated using one. If it matches reasonably well, the inverted prefactor in the exponential can be directly used as the correlation time, which is a measure for how many sampling intervals it takes for correlations to decay. Execute the following code cell for an illustration.",
"from scipy.optimize import curve_fit\n\ndef exp_fnc(x, a, b):\n return a * np.exp(-x / b)\n\nN_MAX = 1000\nj = np.arange(1, N_MAX)\nj_log = np.logspace(0, 3, 100)\npopt, pcov = curve_fit(exp_fnc, j, autocov[1:N_MAX], p0=[15, 10])\n\n# compute analytical ACF of AR(1) process\nAN_SIGMA_1 = np.sqrt(EPS_1 ** 2 / (1 - PHI_1 ** 2))\nAN_TAU_EXP_1 = -1 / np.log(PHI_1)\nan_acf_1 = AN_SIGMA_1**2 * np.exp(-j / AN_TAU_EXP_1)\n\nfig = plt.figure(figsize=(10, 6))\nplt.plot(j, autocov[1:N_MAX], \"x\", label=\"numerical ACF\")\nplt.plot(j, an_acf_1, \"-.\", linewidth=3, label=\"analytical ACF\")\nplt.plot(j_log, exp_fnc(j_log, popt[0], popt[1]), label=\"exponential fit\")\nplt.xlim((1, N_MAX))\nplt.xscale(\"log\")\nplt.xlabel(\"lag time $j$\")\nplt.ylabel(\"$\\hat{R}^{XX}_j$\")\nplt.legend()\nplt.show()\n\nprint(f\"Exponential autocorrelation time: {popt[1]:.2f} sampling intervals\")",
"Since the auto-covariance function is very well matched with an exponential, this analysis already gives us a reasonable estimate of the autocorrelation time. Here we have the luxury to have an analytical ACF at hand which describes the statistics of the simple AR(1) process, which generated our simulation data. It is in fact exponential and agrees very well with the numerical ACF. In practice, however, you will neither know an analytical ACF, nor know if the ACF is exponential, at all. In many systems, the ACF is more or less exponential, but this is not necessarily the case.\nFor the sake of completeness, we also want to compute the integrated correlation time. This technique must be applied when the ACF is not exponential. For that purpose, we first need to normalize the auto-covariance function in order to get the autocorrelation function (as opposed to auto-covariance function), and then integrate over it.\nThe integration in equation (3) is again approximated as a discrete sum over the first $j_\\mathrm{max}$ values of the ACF (except $\\hat{A}^{XX}_0$, which is always 1):\n$\n\\begin{align}\n \\hat{\\tau}{X, \\mathrm{int}} = \\frac{1}{2} + \\sum{j=1}^{j_\\mathrm{max}} \\hat{A}^{XX}_j \\tag{7}\n\\end{align}\n$\nwhere $\\hat{A}^{XX}j = \\hat{R}^{XX}_j / \\hat{R}^{XX}_0$ is the estimated ACF. The sum is evaluated up to a maximum number of terms $j\\mathrm{max}$. This maximum number of terms is a crucial parameter. In the following code cell, $\\hat{\\tau}{X, \\mathrm{int}}$ is plotted over $j\\mathrm{max}$.",
"# compute the ACF\nacf = autocov / autocov[0]\n\n# integrate the ACF (suffix _v for vectors)\nj_max_v = np.arange(1000)\ntau_int_v = np.zeros(1000)\nfor j_max in j_max_v:\n tau_int_v[j_max] = 0.5 + np.sum(acf[1:j_max + 1])\n\n# plot\nfig = plt.figure(figsize=(10, 6))\nplt.plot(j_max_v[1:], tau_int_v[1:], label=\"numerical summing\")\nplt.plot(j_max_v[(1, -1),], np.repeat(AN_TAU_EXP_1, 2), \"-.\", label=\"analytical\")\nplt.xscale(\"log\")\nplt.xlabel(r\"sum length $j_\\mathrm{max}$\")\nplt.ylabel(r\"$\\hat{\\tau}_{X, \\mathrm{int}}$\")\nplt.legend()\nplt.show()",
"In this plot, we have the analytical solution at hand, which is a luxury not present in real applications. For the analysis, we therefore need to act as if there was no analytic solution:\nWe see that the integrated autocorrelation time seems to quickly reach a plateau at a $j_\\mathrm{max}$ of around 20. Further summation over the noisy tail of the ACF results in a random-walky behaviour. And for even larger $j_\\mathrm{max}$, the small unknown bias of the ACF starts to accumulate, which is clearly unwanted. Thus, we have to find a good point to cut off the sum. There are several ways to determine a reasonable value for $j_\\mathrm{max}$. Here, we demonstrate the one by A. Sokal <a href='#[1]'>[1]</a>, who states that it performs well if there are at least 1000 samples in the time series. We take the smallest $j_\\mathrm{max}$, for which the following inequality holds:\n$\nj_\\mathrm{max} \\geq C \\times \\hat{\\tau}{X, \\mathrm{int}}(j\\mathrm{max}) \\tag{8}\n$\nwhere $C$ is a constant of about 5, or higher if convergence of $\\hat{\\tau}{X, \\mathrm{int}}$ is slower than an exponential (up to $C=10$). In the following code cell, we plot the left side against the right side, and determine $j\\mathrm{max}$.",
"C = 5.0\n\n# determine j_max\nj_max = 0\nwhile j_max < C * tau_int_v[j_max]:\n j_max += 1\n\n\n# plot\nfig = plt.figure(figsize=(10, 6))\nplt.plot(j_max_v[1:], C * tau_int_v[1:])\nplt.plot(j_max_v[1:], j_max_v[1:])\nplt.plot([j_max], [C * tau_int_v[j_max]], \"ro\")\nplt.xscale(\"log\")\nplt.ylim((0, 50))\nplt.xlabel(r\"sum length $j_\\mathrm{max}$\")\nplt.ylabel(r\"$C \\times \\hat{\\tau}_{X, \\mathrm{int}}$\")\nplt.show()\n\nprint(f\"j_max = {j_max}\")",
"Using this value of $j_\\mathrm{max}$, we can calculate the integrated autocorrelation time $\\hat{\\tau}_{X, \\mathrm{int}}$ and estimate the SEM with equation (5).",
"tau_int = tau_int_v[j_max]\nprint(f\"Integrated autocorrelation time: {tau_int:.2f} time steps\\n\")\n\nN_eff = N_SAMPLES / (2 * tau_int)\nprint(f\"Original number of samples: {N_SAMPLES}\")\nprint(f\"Effective number of samples: {N_eff:.1f}\")\nprint(f\"Ratio: {N_eff / N_SAMPLES:.3f}\\n\")\n\nsem = np.sqrt(autocov[0] / N_eff)\nprint(f\"Standard error of the mean: {sem:.4f}\")",
"Exercise\n\nWrite a function called autocorrelation_analysis, which takes as arguments\ndata (a numpy array containing the time series to be analyzed), \nC (which is the criterion to find $j_\\mathrm{max}$) and \nwindow (an integer that defines how much of the auto-covariance function is computed during the analysis).\n\nThe function shall return the SEM and logging.info out:\n * mean\n * SEM\n * integrated autocorrelation time\n * effective number of samples. \nIt should also make a plot of the autocorrelation function and the integrated ACF. You can adapt the other examples and solutions in this notebook for this function.\n\nUse this function to analyze time_series_2.\n\nThis function can serve as a template for the analysis of your own simulation data.\n```python\ndef autocorrelation_analysis(data, C, window):\n # initial processing\n data_size = len(data)\n avg = np.average(data)\n data_centered = data - avg\n# auto-covariance function\nautocov = np.empty(window)\nfor j in range(window):\n autocov[j] = np.dot(data_centered[:data_size - j], data_centered[j:])\nautocov /= data_size\n\n# autocorrelation function\nacf = autocov / autocov[0]\n\n# integrate autocorrelation function\nj_max_v = np.arange(window)\ntau_int_v = np.zeros(window)\nfor j_max in j_max_v:\n tau_int_v[j_max] = 0.5 + np.sum(acf[1:j_max + 1])\n\n# find j_max\nj_max = 0\nwhile j_max < C * tau_int_v[j_max]:\n j_max += 1\n\n# wrap it up\ntau_int = tau_int_v[j_max]\nN_eff = data_size / (2 * tau_int)\nsem = np.sqrt(autocov[0] / N_eff)\n\n# create ACF plot\nfig = plt.figure(figsize=(10, 6))\nplt.gca().axhline(0, color=\"gray\",linewidth=1)\nplt.plot(acf)\nplt.xlabel(\"lag time $j$\")\nplt.ylabel(\"$\\hat{R}^{XX}_j$\")\nplt.show()\n\n# create integrated ACF plot\nfig = plt.figure(figsize=(10, 6))\nplt.plot(j_max_v[1:], C * tau_int_v[1:])\nplt.ylim(plt.gca().get_ylim()) # explicitly keep the limits of the first plot\nplt.plot(j_max_v[1:], j_max_v[1:])\nplt.plot([j_max], [C * tau_int_v[j_max]], \"ro\")\nplt.xscale(\"log\")\nplt.xlabel(r\"sum length $j_\\mathrm{max}$\")\nplt.ylabel(r\"$C \\times \\hat{\\tau}_{X, \\mathrm{int}}$\")\nplt.title(\"\")\nplt.show()\n\n# print out stuff\nprint(f\"Mean value: {avg:.4f}\")\nprint(f\"Standard error of the mean: {sem:.4f}\")\nprint(f\"Integrated autocorrelation time: {tau_int:.2f} time steps\")\nprint(f\"Effective number of samples: {N_eff:.1f}\")\n\nreturn sem\n\nsem_2 = autocorrelation_analysis(time_series_2, 5, 20000)\n```\nExercise\nInterpret the results of the analysis of time_series_2.\nInterpretation of the analysis\nEven though the autocorrelation analysis spits out a number for the SEM, it cannot be trusted. The ACF has a lot of noise in its tail which lets the integrated ACF become very \"random walky\" and therefore unreliable. This means that the ACF has not properly decayed to zero. The only possibility to get better statistics is to simulate for a longer time. Since the autocorrelation time is very long, it is sufficient to store a lot less samples during simulation. The sampling interval could be chosen to be 100 times larger and still capture the statistics sufficiently well.\nReferences\n<a id='[1]'></a>[1] A. Sokal. <a href=\"https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.49.4444\">Monte Carlo Methods in Statistical Mechanics: Foundations and New Algorithms Note to the Reader</a> , 1996"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
dchandan/rebound
|
ipython_examples/CloseEncounters.ipynb
|
gpl-3.0
|
[
"Catching close encounters using exceptions\nSometimes one is interested in catching a close encounter between two planets. This can easily be done with REBOUND. What you do when a close encounter happens is up to you.\nSome integrators are better suited to simulate close encounters than others. For example, the non-symplectic integrator IAS15 has an adaptive timestep scheme that resolves close encounters very well. Integrators that use a fixed timestep like WHFast are more likely to miss close encounters.\nLet's start with a two-planet system that will go unstable on a short timescale (we'll also assign them arbitrary IDs for later use):",
"import rebound\nimport numpy as np\ndef setupSimulation():\n sim = rebound.Simulation()\n sim.integrator = \"ias15\" # IAS15 is the default integrator, so we don't need this line\n sim.add(m=1., id=1)\n sim.add(m=1e-3,a=1., id=5)\n sim.add(m=5e-3,a=1.25, id=7)\n sim.move_to_com()\n return sim",
"Let's integrate this system for 100 orbital periods.",
"sim = setupSimulation()\nsim.integrate(100.*2.*np.pi)",
"Rebound exits the integration routine normally. We can now explore the final particle orbits:",
"for o in sim.calculate_orbits():\n print(o)",
"We see that the orbits of both planets changed significantly and we can already speculate that there was a close encounter.\nLet's redo the simulation, but this time set the sim.exit_min_distance flag for the simulation. If this flag is set, then REBOUND calculates the minimum distance between all particle pairs each timestep. If the distance is less than sim.exit_min_distance, then the integration is stopped and an exception thrown. Here, we'll use the Hill radius as the criteria for a close encounter. It is given by $r_{\\rm Hill} \\approx a \\sqrt{\\frac{m}{3M}}$, which is approximately 0.15 AU in our case. \nThis setup allows us to instead catch the exception and deal with it in a customized way. As a first example, let's catch the exception with a try-except block, and simply print out the error message. Additionally, let's store the particles' separations while we're integrating:",
"sim = setupSimulation() # Resets everything\nsim.exit_min_distance = 0.15\nNoutputs = 1000\ntimes = np.linspace(0,100.*2.*np.pi,Noutputs)\ndistances = np.zeros(Noutputs)\nps = sim.particles # ps is now an array of pointers. It will update as the simulation runs.\ntry:\n for i,time in enumerate(times):\n sim.integrate(time)\n dx = ps[1].x - ps[2].x\n dy = ps[1].y - ps[2].y\n dz = ps[1].z - ps[2].z\n distances[i] = np.sqrt(dx*dx+dy*dy+dz*dz)\nexcept rebound.Encounter as error:\n print(error)",
"The Encounter does currently not tell you wich particles had a close encounter. But you can easily search for the pair yourself (see below). \nHere, we already know which bodies had a close encounter (the two planets), so let's plot their separation.",
"%matplotlib inline\nimport matplotlib.pyplot as plt\nfig = plt.figure(figsize=(10,5))\nax = plt.subplot(111)\nax.set_xlabel(\"time [orbits]\")\nax.set_xlim([0,sim.t/(2.*np.pi)])\nax.set_ylabel(\"distance\")\nplt.plot(times/(2.*np.pi), distances);\nplt.plot([0.0,12],[0.2,0.2]) # Plot our close encounter criteria;",
"We did indeed find the close enounter correctly. We can now search for the two particles that collided and, for this example, merge them. To do that we'll first calculate our new merged planet coordinates, then remove the two particles that collided from REBOUND and finally add the new particle.",
"import copy\nfrom itertools import combinations\ndef mergeParticles(sim):\n # Find two closest particles\n min_d2 = 1e9 # large number\n particles = sim.particles\n for p1, p2 in combinations(particles,2):\n dx = p1.x - p2.x\n dy = p1.y - p2.y\n dz = p1.z - p2.z\n d2 = dx*dx + dy*dy + dz*dz\n if d2<min_d2:\n min_d2 = d2\n cp1 = p1\n cp2 = p2\n \n # Merge two closest particles\n mergedPlanet = rebound.Particle()\n mergedPlanet.m = cp1.m + cp2.m\n mergedPlanet.x = (cp1.m*cp1.x + cp2.m*cp2.x) /mergedPlanet.m\n mergedPlanet.y = (cp1.m*cp1.y + cp2.m*cp2.y) /mergedPlanet.m\n mergedPlanet.z = (cp1.m*cp1.z + cp2.m*cp2.z) /mergedPlanet.m\n mergedPlanet.vx = (cp1.m*cp1.vx + cp2.m*cp2.vx)/mergedPlanet.m\n mergedPlanet.vy = (cp1.m*cp1.vy + cp2.m*cp2.vy)/mergedPlanet.m\n mergedPlanet.vz = (cp1.m*cp1.vz + cp2.m*cp2.vz)/mergedPlanet.m\n mergedPlanet.id = cp1.id \n id1 = p1.id\n id2 = p2.id\n sim.remove(id=id1)\n sim.remove(id=id2)\n sim.add(mergedPlanet)\n\nsim = setupSimulation() # Resets everything\nsim.exit_min_distance = 0.15\nprint(\"Number of particles at the beginning of the simulation: %d.\"%sim.N)\nfor i,time in enumerate(times):\n try:\n sim.integrate(time)\n except rebound.Encounter as error:\n print(error)\n mergeParticles(sim)\nprint(\"Number of particles at the end of the simulation: %d.\"%sim.N)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
statkraft/shyft-doc
|
notebooks/api/api-essentials.ipynb
|
lgpl-3.0
|
[
"Essential api classes\nIntroduction\nThere are a few features within the Shyft api that can be helpful for plotting, aggregating, and conducting data reduction. Herein we demonstrate some of this functionality. If you gain an understanding of the aspects described herein, working with Shyft becomes much simpler and more 'pythonic'.\n0. Loading required python modules and running a configured shyft simulation\nThe first step below is simply to get a simulation up and running using the features of orchestration. If you are not familiar with this step, it is recommended to see the configured simulation example first.",
"# Pure python modules and jupyter notebook functionality\n# first you should import the third-party python modules which you'll use later on\n# the first line enables that figures are shown inline, directly in the notebook\n%pylab inline\nimport os\nimport datetime as dt\nfrom os import path\nimport sys\nfrom matplotlib import pyplot as plt\n\n# try to auto-configure the path, -will work in all cases where doc and data\n# are checked out at same level\nshyft_data_path = path.abspath(\"../../../shyft-data\")\nif path.exists(shyft_data_path) and 'SHYFT_DATA' not in os.environ:\n os.environ['SHYFT_DATA']=shyft_data_path\n",
"1. The key classes within the api and being \"pythonic\"\nThere are several essential class types that are used throughout Shyft, and initially, these may cause some challenges -- particularly to seasoned python users. If you are used to working with the datetime module, pandas and numpy, it will be important that you understand some of the basic concepts presented here.\nAt first, some of this may seem unneccessary. The point of these specialized classes are to provide efficiency withing the C++ core, and with respect to time, to make sure the calculations adhere to a few strict concepts.\nIntVector, and other ---Vector types\nWhen you see the class IntVector, just recognize these as lists. They should behave nearly identical to python lists, and where possible, we treat them as such. The main difference, is that they are specialized for their 'type', and under the covers within Shyft provide great efficiency.",
"from shyft.time_series import IntVector, DoubleVector\nimport numpy as np\n# works:\niv = IntVector([0, 1, 4, 5])\nprint(iv)\n# won't work:\n# iv[2] = 2.2\n\n# see the DoubleVector\ndv = DoubleVector([1.0, 3, 4.5, 10.110293])\nprint(dv)\ndv[0] = 2.3\n",
"Note, however, that these containers are very basic lists. They don't have methods such as .pop and .index. In generally, they are meant just to be used as first class containers for which you'll pass data into before passing it to Shyft. For those familiar with python and numpy, you can think of it similar to the advantages of using numpy arrays over pure python lists. The numpy array is far more efficient. In the case of Shyft, it is similar, and the IntVector, DoubleVector and other specialized vector types are much more efficient.",
"IV1 = IntVector([int(i) for i in np.arange(1000)])\n",
"TODO:\nCalendar, TimeSeries, TimeAxis, UtcPeriod, TsVector",
"\n\n# once the shyft_path is set correctly, you should be able to import shyft modules\nimport shyft\n\n# if you have problems here, it may be related to having your LD_LIBRARY_PATH\n# pointing to the appropriate libboost_python libraries (.so files)\nfrom shyft import api\nfrom shyft.repository.default_state_repository import DefaultStateRepository\nfrom shyft.orchestration.configuration.yaml_configs import YAMLSimConfig\nfrom shyft.orchestration.simulators.config_simulator import ConfigSimulator\n\n# here is the *.yaml file that configures the simulation:\nconfig_file_path = os.path.abspath(\"../nea-example/nea-config/neanidelva_simulation.yaml\")\ncfg = YAMLSimConfig(config_file_path, \"neanidelva\")\n\nsimulator = ConfigSimulator(cfg) \n# run the model, and we'll just pull the `api.model` from the `simulator`\nsimulator.run()\nmodel = simulator.region_model",
"1. shyft.time_series.TimeSeries\nThe Shyft time_series, shyft.time_series contains a lot of functionality worth exploring.\nThe TimeSeries class provides some tools for adding timeseries, looking at statistics, etc. Below is a quick exploration of some of the possibilities. Users should explore using the source code, tab completion, and most of all help to get the full story...",
"# First, we can also plot the statistical distribution of the \n# discharges over the sub-catchments\nfrom shyft.time_series import TsVector,IntVector,TimeAxis,Calendar,time,UtcPeriod\n# api.TsVector() is a a strongly typed list of time-series,that supports time-series vector operations. \ndischarge_ts = TsVector() # except from the type, it just works as a list()\n# loop over each catchment, and extract the time-series (we keep them as such for now)\nfor cid in model.catchment_ids: # fill in discharge time series for all subcatchments\n discharge_ts.append(model.statistics.discharge([int(cid)]))\n\n# get the percentiles we want, note -1 = arithmetic average\npercentiles= IntVector([10,25,50,-1,75,90]) \n\n# create a Daily(for the fun of it!) time-axis for the percentile calculations\n# (our simulation could be hourly) \nta_statistics = TimeAxis(model.time_axis.time(0), Calendar.DAY, 365)\n\n# then simply get out a new set of time-series, corresponding to the percentiles we specified\n# note that discharge_ts is of the TsVector type, not a simple list as in our first example above\ndischarge_percentiles = discharge_ts.percentiles(ta_statistics, percentiles)\n\n#utilize that we know that all the percentile time-series share a common time-axis\nts_timestamps = [dt.datetime.utcfromtimestamp(p.start) for p in ta_statistics] \n\n# Then we can make another plot of the percentile data for the sub-catchments\nfig, ax = plt.subplots(figsize=(20,15))\n\n# plot each discharge percentile in the discharge_percentiles\nfor i,ts_percentile in enumerate(discharge_percentiles):\n clr='k'\n if percentiles[i] >= 0.0: \n clr= str(float(percentiles[i]/100.0))\n ax.plot(ts_timestamps, ts_percentile.values, label = \"{}\".format(percentiles[i]), color=clr)\n\n# also plot catchment discharge along with the statistics\n# notice that we use .average(ta_statistics) to properly align true-average values to time-axis\nax.plot(ts_timestamps, discharge_ts[0].average(ta_statistics).values, \n label = \"CID {}\".format(model.catchment_ids[0]),\n linewidth=2.0, alpha=0.7, color='b')\n\nfig.autofmt_xdate()\nax.legend(title=\"Percentiles\")\nax.set_ylabel(\"discharge [m3 s-1]\")\n\n# a simple percentile plot, from orchestration looks nicer\nfrom shyft.orchestration import plotting as splt\noslo = Calendar('Europe/Oslo')\nfig, ax = plt.subplots(figsize=(16,8))\nsplt.set_calendar_formatter(oslo)\nh, ph = splt.plot_np_percentiles(ts_timestamps,[ p.values for p in discharge_percentiles],\n base_color=(0.03,0.01,0.3))\n\nax = plt.gca()\nax.set_ylabel(\"discharge [m3 s-1]\") \nplt.title(\"CID {}\".format(model.catchment_ids[0]))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
European-XFEL/h5tools-py
|
docs/dssc_geometry.ipynb
|
bsd-3-clause
|
[
"DSSC detector geometry\nAs of version 0.5, karabo_data has geometry code for the DSSC detector.\nThis doesn't currently account for the hexagonal pixels of DSSC, but it's\ngood enough for a preview of detector images.",
"%matplotlib inline\nfrom karabo_data.geometry2 import DSSC_1MGeometry\n\n# Made up numbers!\nquad_pos = [\n (-130, 5),\n (-130, -125),\n (5, -125),\n (5, 5),\n]\npath = 'dssc_geo_june19.h5'\n\ng = DSSC_1MGeometry.from_h5_file_and_quad_positions(path, quad_pos)\n\ng.inspect()\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ng.expected_data_shape",
"We'll use some empty data to demonstate assembling an image.",
"a = np.zeros(g.expected_data_shape)\n\ng.plot_data_fast(a, axis_units='m');",
"Let's have a close up look at some pixels in Q1M1. get_pixel_positions() gives us pixel centres.\nto_distortion_array() gives pixel corners in a slightly different format, suitable for PyFAI.\nPyFAI requires non-negative x and y coordinates. But we want to plot them along with the centre positions, so we pass allow_negative_xy=True to get comparable coordinates.",
"pixel_pos = g.get_pixel_positions()\nprint(\"Pixel positions array shape:\", pixel_pos.shape,\n \"= (modules, slow_scan, fast_scan, x/y/z)\")\nq1m1_centres = pixel_pos[0]\ncx = q1m1_centres[..., 0]\ncy = q1m1_centres[..., 1]\n\ndistortn = g.to_distortion_array(allow_negative_xy=True)\nprint(\"Distortion array shape:\", distortn.shape,\n \"= (modules * slow_scan, fast_scan, corners, z/y/x)\")\nq1m1_corners = distortn[:128]\n\nfrom matplotlib.patches import Polygon\nfrom matplotlib.collections import PatchCollection\n\nfig, ax = plt.subplots(figsize=(10, 10))\n\nhexes = []\nfor ss_pxl in range(4):\n for fs_pxl in range(5):\n \n # Create hexagon\n corners = q1m1_corners[ss_pxl, fs_pxl]\n corners = corners[:, 1:][:, ::-1] # Drop z, flip x & y\n hexes.append(Polygon(corners))\n \n # Draw text label near the pixel centre\n ax.text(cx[ss_pxl, fs_pxl], cy[ss_pxl, fs_pxl],\n ' [{}, {}]'.format(ss_pxl, fs_pxl),\n verticalalignment='bottom', horizontalalignment='left')\n\n# Add the hexagons to the plot\npc = PatchCollection(hexes, facecolor=(0.75, 1.0, 0.75), edgecolor='k')\nax.add_collection(pc)\n\n# Plot the pixel centres\nax.scatter(cx[:5, :6], cy[:5, :6], marker='x')\n\n# matplotlib is reluctant to show such a small area, so we need to set the limits manually\nax.set_xlim(-0.007, -0.0085) # To match the convention elsewhere, draw x right-to-left\nax.set_ylim(0.0065, 0.0075)\nax.set_ylabel(\"metres\")\nax.set_xlabel(\"metres\")\nax.set_aspect(1)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Diyago/Machine-Learning-scripts
|
general studies/Mining Twitter.ipynb
|
apache-2.0
|
[
"Example 1. Authorizing an application to access Twitter account data",
"import twitter\n\n# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values\n# for these credentials, which you'll need to provide in place of these\n# empty string values that are defined as placeholders.\n# See https://dev.twitter.com/docs/auth/oauth for more information \n# on Twitter's OAuth implementation.\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET =''\nOAUTH_TOKEN = '-'\nOAUTH_TOKEN_SECRET = ''\n\nauth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,\n CONSUMER_KEY, CONSUMER_SECRET)\n\ntwitter_api = twitter.Twitter(auth=auth)\n\n# Nothing to see by displaying twitter_api except that it's now a\n# defined variable\n\nprint twitter_api",
"Example 2. Retrieving trends",
"# The Yahoo! Where On Earth ID for the entire world is 1.\n# See https://dev.twitter.com/docs/api/1.1/get/trends/place and\n# http://developer.yahoo.com/geo/geoplanet/\n\nWORLD_WOE_ID = 1\nUS_WOE_ID = 23424977\nRUS_WOE_ID = 2122265\n# Prefix ID with the underscore for query string parameterization.\n# Without the underscore, the twitter package appends the ID value\n# to the URL itself as a special case keyword argument.\n\nworld_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)\nus_trends = twitter_api.trends.place(_id=US_WOE_ID)\nrus_trends = twitter_api.trends.place(_id=RUS_WOE_ID)\n\n\nprint world_trends\nprint\nprint us_trends\n\ndir()",
"Example 3. Displaying API responses as pretty-printed JSON",
"import json\n\nprint json.dumps(world_trends, indent=1)\nprint\nprint json.dumps(us_trends, indent=1)\nprint\nprint json.dumps(rus_trends, indent=1)",
"Example 4. Computing the intersection of 3 sets of trends",
"world_trends_set = set([trend['name'] \n for trend in world_trends[0]['trends']])\n\nus_trends_set = set([trend['name'] \n for trend in us_trends[0]['trends']])\n\nrus_trends_set = set([trend['name'] \n for trend in rus_trends[0]['trends']]) \n\ncommon_trends = world_trends_set.intersection(us_trends_set, rus_trends_set)\nprint common_trends\ncommon_trends = world_trends_set.intersection(us_trends_set)\nprint common_trends\ncommon_trends = world_trends_set.intersection(rus_trends_set)\nprint common_trends",
"Example 5. Collecting search results",
"# Import unquote to prevent url encoding errors in next_results\nfrom urllib import unquote\n\n# XXX: Set this variable to a trending topic, \n# or anything else for that matter. The example query below\n# was a trending topic when this content was being developed\n# and is used throughout the remainder of this chapter.\n\nq = '#success' \n\ncount = 100\n\n# See https://dev.twitter.com/docs/api/1.1/get/search/tweets\n\nsearch_results = twitter_api.search.tweets(q=q, count=count)\n\nstatuses = search_results['statuses']\n\n\n# Iterate through 5 more batches of results by following the cursor\n\nfor _ in range(5):\n print \"Length of statuses\", len(statuses)\n try:\n next_results = search_results['search_metadata']['next_results']\n except KeyError, e: # No more results when next_results doesn't exist\n break\n \n # Create a dictionary from next_results, which has the following form:\n # ?max_id=313519052523986943&q=NCAA&include_entities=1\n kwargs = dict([ kv.split('=') for kv in unquote(next_results[1:]).split(\"&\") ])\n \n search_results = twitter_api.search.tweets(**kwargs)\n statuses += search_results['statuses']\n\n# Show one sample search result by slicing the list...\nprint json.dumps(statuses[0], indent=1)\n\ntype(statuses)\n\n# get the most retweeted tweet\nmax_num = 0\nfor status in statuses:\n if status['retweet_count'] > max_num:\n max_num = status['retweet_count']\n\nprint max_num",
"Example 6. Extracting text, screen names, and hashtags from tweets",
"status_texts = [ status['text'] for status in statuses ]\n\nscreen_names = [ user_mention['screen_name'] \n for status in statuses\n for user_mention in status['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text'] \n for status in statuses\n for hashtag in status['entities']['hashtags'] ]\n\n# Compute a collection of all words from all tweets\nwords = [ w \n for t in status_texts \n for w in t.split() ]\n\n# Explore the first 5 items for each...\n\nprint json.dumps(status_texts[0:5], indent=1)\nprint json.dumps(screen_names[0:5], indent=1) \nprint json.dumps(hashtags[0:5], indent=1)\nprint json.dumps(words[0:5], indent=1)",
"Example 7. Creating a basic frequency distribution from the words in tweets",
"from collections import Counter\n\nfor item in [words, screen_names, hashtags]:\n c = Counter(item)\n print c.most_common()[:10] # top 10\n print\n\nprint json.dumps(screen_names, indent=1) ",
"Example 8. Using prettytable to display tuples in a nice tabular format",
"from prettytable import PrettyTable\n\nfor label, data in (('Word', words), \n ('Screen Name', screen_names), \n ('Hashtag', hashtags)):\n pt = PrettyTable(field_names=[label, 'Count']) \n c = Counter(data)\n [ pt.add_row(kv) for kv in c.most_common()[:10] ]\n pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment\n print pt",
"Example 9. Calculating lexical diversity for tweets",
"# A function for computing lexical diversity\ndef lexical_diversity(tokens):\n return 1.0*len(set(tokens))/len(tokens) \n\n# A function for computing the average number of words per tweet\ndef average_words(statuses):\n total_words = sum([ len(s.split()) for s in statuses ]) \n return 1.0*total_words/len(statuses)\n\nprint lexical_diversity(words)\nprint lexical_diversity(screen_names)\nprint lexical_diversity(hashtags)\nprint average_words(status_texts)",
"Example 10. Finding the most popular retweets",
"retweets = [\n # Store out a tuple of these three values ...\n (status['retweet_count'], \n status['retweeted_status']['user']['screen_name'],\n status['text'],\n status['retweeted_status']['id']\n ) \n \n # ... for each status ...\n for status in statuses \n # ... so long as the status meets this condition.\n if status.has_key('retweeted_status')\n \n ]\n\n# Slice off the first 5 from the sorted results and display each item in the tuple\n\npt = PrettyTable(field_names=['Count', 'Screen Name', 'Text', 'id'])\n[ pt.add_row(row) for row in sorted(retweets, reverse=True)[:5] ]\npt.max_width['Text'] = 50\npt.align= 'l'\nprint pt",
"Example 11. Looking up users who have retweeted a status",
"# Get the original tweet id for a tweet from its retweeted_status node \n# and insert it here in place of the sample value that is provided\n# from the text of the book\n\n_retweets = twitter_api.statuses.retweets(id=833667112648466436)\nprint [r['user']['screen_name'] for r in _retweets]",
"Example 12. Plotting frequencies of words",
"%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nword_counts = sorted(Counter(words).values(), reverse=True)\n\nplt.loglog(word_counts)\nplt.ylabel(\"Freq\")\nplt.xlabel(\"Word Rank\")",
"Example 13. Generating histograms of words, screen names, and hashtags",
"for label, data in (('Words', words), \n ('Screen Names', screen_names), \n ('Hashtags', hashtags)):\n\n # Build a frequency map for each set of data\n # and plot the values\n c = Counter(data)\n plt.hist(c.values())\n \n # Add a title and y-label ...\n plt.title(label)\n plt.ylabel(\"Number of items in bin\")\n plt.xlabel(\"Bins (number of times an item appeared)\")\n \n # ... and display as a new figure\n plt.figure()",
"Example 14. Generating a histogram of retweet counts",
"# Using underscores while unpacking values in\n# a tuple is idiomatic for discarding them\ncounts = []\n\nfor status in statuses:\n counts.append(status['retweet_count'])\n#counts = [count for count, _, _ in retweets]\n\nplt.hist(counts)\nplt.title(\"Retweets with 0\")\nplt.xlabel('Bins (number of times retweeted)')\nplt.ylabel('Number of tweets in bin')\n\n\n\n\n\ncounts = []\nfor status in statuses:\n if status['retweet_count'] > 0:\n counts.append(status['retweet_count'])\nplt.hist(counts)\nplt.title(\"Retweets with 0\")\nplt.xlabel('Bins (number of times retweeted)')\nplt.ylabel('Number of tweets in bin')",
"Note: This histogram gives you an idea of how many times tweets are retweeted with the x-axis defining partitions for tweets that have been retweeted some number of times and the y-axis telling you how many tweets fell into each bin. For example, a y-axis value of 5 for the \"15-20 bin\" on the x-axis means that there were 5 tweets that were retweeted between 15 and 20 times.\nHere's another variation that transforms the data using the (automatically imported from numpy) log function in order to improve the resolution of the plot.",
"# Using underscores while unpacking values in\n# a tuple is idiomatic for discarding them\n\ncounts = []\nfor status in statuses:\n if status['retweet_count'] > 0:\n counts.append(math.log(status['retweet_count']))\n\n# Taking the log of the *data values* themselves can \n# often provide quick and valuable insight into the\n# underlying distribution as well. Try it back on\n# Example 13 and see if it helps.\n\nplt.hist((counts))\nplt.title(\"Retweets\")\nplt.xlabel('Log[Bins (number of times retweeted)]')\nplt.ylabel('Log[Number of tweets in bin]')\n\nprint log(counts)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
christophebertrand/ada-epfl
|
HW02-Data_from_the_Web/bachelor_data_analysis.ipynb
|
mit
|
[
"import pandas as pd\nimport numpy as np\nimport requests as requests\nfrom bs4 import BeautifulSoup\nimport re\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\npd.options.mode.chained_assignment = None # default='warn'\n\nr = requests.get('http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.filter?ww_i_reportModel=133685247')\nsoupe = BeautifulSoup(r.text, 'html.parser')\n\n",
"Collect the data\nGet the name of the 4 fields we have to select",
"select = soupe.find_all('select')\nselect_name = [s.attrs['name'] for s in select]\nselect_name\n",
"Get the select field correspondind to the 4 names found before",
"select_field = [soupe.find('select',{'name': name}) for name in select_name]\n",
"Get the value corresponding to the \"Informatique\"",
"option_unite_acad = select_field[0].find_all('option')\n#option_unite_acad[[opt.text == 'Informatique' for opt in option_unite_acad]]\noption_unite_acad\nunite_acad ={opt['value']: opt.text for opt in option_unite_acad if opt.text == 'Informatique'}\n \nunite_acad ",
"Get all the values of the academic period field\nIn the second select_Field, in the option tag, we take all value execept the one equal to null\nWe only keep the period that are bigger than 2007 (in case there were older periods)",
"option = select_field[1].find_all('option')\nperiod_acad = {opt['value']: opt.text for opt in option if opt['value'] != 'null' and int(opt.text.split('-')[0]) >= 2007}\nperiod_acad",
"Get all the values of the pedagogic period field correspoding to the bachelor semester\nin the 3rd select_field, we take all value that contains 'Bachelor' in the label\nSince we need to find the first and last record of a student, we only consider the 1st, 5th and 6th semester. \nIt is not possible to finish his bachelor during the 2, 3 or 4 semester but it is possible to finish during the 5th semester if we miss some credits during our last year and we only need one semester to finish",
"option = select_field[2].find_all('option')\n\nperiod_pedago = {opt['value']: opt.text for opt in option if 'Bachelor' in opt.text and ('1' in opt.text or '5' in opt.text or '6' in opt.text) }\nperiod_pedago\n\noption = select_field[3].find_all('option')\nhiverEte = {opt['value']: opt.text for opt in option if opt['value'] != 'null'}\nhiverEte",
"Collect the data\nCreate a function that will parse one request and return a dataFrame",
"def parseRequest(u_a, p_a, p_p, h_e):\n\n #Send request\n url = 'http://isa.epfl.ch/imoniteur_ISAP/!GEDPUBLICREPORTS.html?ww_x_GPS=-1&ww_i_reportModel=133685247&ww_i_reportModelXsl=133685270&ww_x_UNITE_ACAD='+u_a[0]+'&ww_x_PERIODE_ACAD='+p_a[0]+'&ww_x_PERIODE_PEDAGO='+p_p[0]+'&ww_x_HIVERETE='+ h_e\n r = requests.get(url)\n soupe = BeautifulSoup(r.text, 'html.parser')\n \n #get the header , we also replace the space by '_' (easier to use the dataframe later) \n th_tag = soupe.find_all('th')\n th = [t.text.replace(' ', '_') for t in th_tag]\n #remove the first th that correspond to the title of the table\n th = th[1:]\n\n \n #save the size of the header\n header_size = len(th)\n #add new information (new columns): year_start, year_stop, semester number\n th = np.append(th, ['Year_start', 'Year_stop', 'Semester'])\n \n #Find all the 'tr' tag\n tr_tag = soupe.find_all('tr')\n #drop the 2 first tag that correspond to the title and the headers of the table\n tr_tag = tr_tag[2:]\n \n #Temporary dictionary that will collect all the entry of the dataframe\n data = []\n \n #Read the request line by line and fill the dataframe\n for tr in tr_tag:\n #create the new entry\n row = [r.text.replace('\\xa0', ' ') for r in tr]\n #one row contains 12 elements but the header has only 11-> drop the last one because it is always empty\n row = row[:header_size]\n\n ##add the new information to the row\n #split the academic period\n year = p_a[1].split('-')\n #find the semester\n semester = p_p[1].split()[2]\n newCol = [int(year[0]), int(year[1]), semester]\n\n #concat the row with the new info\n row += newCol\n data.append(row)\n\n df = pd.DataFrame(data, columns= th)\n return df",
"We iterate over all the parameters. We decided to skip the 'Type de semestre' (HIVERETE) since it is a redundant information. An odd semester is always in Autumn and an even one is always in Spring",
"\n\nlist_df = []\n\nfor u_a in unite_acad.items():\n for p_a in period_acad.items():\n for p_p in period_pedago.items():\n print('Request for: ',u_a[1], p_a[1], p_p[1])\n list_df.append(parseRequest(u_a,p_a, p_p, 'null'))\n\n\nStudent = pd.concat(list_df, ignore_index=True)\nStudent \n",
"How many years it took each student to go from the first to the sixth semester\nAs said before, here we check student that are in semester 1 (beginning) and semester 6 or 5 (in case they did the bachelor in 3.5 or 4.5 year)",
"Student.index = Student.No_Sciper + Student.Semester.astype(str) + Student.Year_start.astype(str)\nStudent.index.is_unique",
"Show total number of student that made at least one semester",
"len(Student.No_Sciper.unique())",
"Eliminate student who don't finished their studies\nWe group by sciper number (which we now is unique for each student). It return a sciper with a dataframe containing all the entries for one student\nWe keep people that appear in semester 1, 5 and 6. => those are the people that graduated in informatique\nWe drop all other people because:\n * if they don't appear in semester 6 it means they never finished the Bachelor\n * if they appear only in semester 5 and 6 it means that they began in another section (usually in communication system), but we can't know when they began epfl without loading the data for all sections\nBut just to have an idea, we keep the person who only take part to semester 5 and 6, just to see the proportion",
"def computeTotalYears(df):\n start = df.Year_start.min()\n end = df.Year_stop.max()\n end_semester = df[df.Year_stop == end].Semester\n if(end_semester == '6').any():\n return (int(end) - int(start)) \n else: \n return (int(end) - int(start) -0.5)\n \n\nStudent_copy = Student.copy()\nStudent_copy.index = Student.index\n\n\n#We init the dataframe\n#store people that complete the 3 years in informatique\nBachelor = pd.DataFrame(columns = ['Sciper', 'Civilité', 'Years'])\n#store people that complet only the 2 last years\nOnly_5_6 = pd.DataFrame(columns = ['Sciper', 'Civilité', 'Years'])\n\n#Groupe by sciper\ngrouped = Student_copy.groupby(['No_Sciper'])\n\n\n\nfor scip, group in grouped:\n if((group.Semester != '1').all() and (group.Semester == '5').any() and (group.Semester == '6').any()): \n total = computeTotalYears(group)\n Only_5_6.ix[scip] = [scip,group.Civilité.iloc[0] , total ]\n elif((group.Semester == '1').any() and (group.Semester == '5').any() and (group.Semester == '6').any()):\n total = computeTotalYears(group)\n Bachelor.ix[scip] = [scip,group.Civilité.iloc[0] , total ] \n\n\nBachelor.Years.max()\n\nBachelor.Years.min()\n\nBachelor.head()",
"Person that didn't complete the first year in compute Science, we don't consider them since we can't know when they begin their first year",
"Only_5_6.count()",
"Nomber of person that complete the bachelor in computer science",
"Bachelor.count()",
"Number of person that tried at least the first years or last one",
"len(grouped)",
"Person that tried the first year but never finished the bachelor",
"len(grouped) - len(Bachelor) - len(Only_5_6)",
"Compute the average time (in years) to complete the bachelor\nwe choose to ouptut the result in years since it is more significant for human than month. To have the number of months we just need to multiply by 12\nIn total",
"len(Bachelor)\n\naverage = Bachelor.Years.sum()/len(Bachelor)\naverage\n\nBachelor.Years.max()\n\nBachelor.Years.min()\n\nBachelor.Years.hist(bins = 10, range=[3, 8])",
"Female",
"Female = Bachelor[Bachelor.Civilité == 'Madame']\nlen(Female)\n\naverageFemale = Female.Years.sum()/len(Female)\naverageFemale\n\nFemale.Years.hist(bins = 10, range=[3, 8])\n",
"Male",
"Male = Bachelor[Bachelor.Civilité == 'Monsieur']\nlen(Male)\n\naverage = Male.Years.sum()/len(Male)\naverage\n\n\nMale.Years.hist(bins = 10, range=[3, 8])",
"Test the results",
"import scipy.stats as stats\n",
"We want to see if the difference of the average years for female and male are statistically significant with a threshold of 95%\nWe use a Welch's T-Test (which does not assume equal population variance): it measures whether the average value differs significantly across samples.",
"stats.ttest_ind(a = Female.Years, b= Male.Years, equal_var=False)",
"Since the pvalue is > 0.05, we cannot reject the null hypothesis of identical average scores which means: we cannot say that the difference is in average statistically significant"
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
ES-DOC/esdoc-jupyterhub
|
notebooks/cmcc/cmip6/models/sandbox-3/ocean.ipynb
|
gpl-3.0
|
[
"ES-DOC CMIP6 Model Properties - Ocean\nMIP Era: CMIP6\nInstitute: CMCC\nSource ID: SANDBOX-3\nTopic: Ocean\nSub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing. \nProperties: 133 (101 required)\nModel descriptions: Model description details\nInitialized From: -- \nNotebook Help: Goto notebook help page\nNotebook Initialised: 2018-02-15 16:53:50\nDocument Setup\nIMPORTANT: to be executed each time you run the notebook",
"# DO NOT EDIT ! \nfrom pyesdoc.ipython.model_topic import NotebookOutput \n\n# DO NOT EDIT ! \nDOC = NotebookOutput('cmip6', 'cmcc', 'sandbox-3', 'ocean')",
"Document Authors\nSet document authors",
"# Set as follows: DOC.set_author(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Contributors\nSpecify document contributors",
"# Set as follows: DOC.set_contributor(\"name\", \"email\") \n# TODO - please enter value(s)",
"Document Publication\nSpecify document publication status",
"# Set publication status: \n# 0=do not publish, 1=publish. \nDOC.set_publication_status(0)",
"Document Table of Contents\n1. Key Properties\n2. Key Properties --> Seawater Properties\n3. Key Properties --> Bathymetry\n4. Key Properties --> Nonoceanic Waters\n5. Key Properties --> Software Properties\n6. Key Properties --> Resolution\n7. Key Properties --> Tuning Applied\n8. Key Properties --> Conservation\n9. Grid\n10. Grid --> Discretisation --> Vertical\n11. Grid --> Discretisation --> Horizontal\n12. Timestepping Framework\n13. Timestepping Framework --> Tracers\n14. Timestepping Framework --> Baroclinic Dynamics\n15. Timestepping Framework --> Barotropic\n16. Timestepping Framework --> Vertical Physics\n17. Advection\n18. Advection --> Momentum\n19. Advection --> Lateral Tracers\n20. Advection --> Vertical Tracers\n21. Lateral Physics\n22. Lateral Physics --> Momentum --> Operator\n23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff\n24. Lateral Physics --> Tracers\n25. Lateral Physics --> Tracers --> Operator\n26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff\n27. Lateral Physics --> Tracers --> Eddy Induced Velocity\n28. Vertical Physics\n29. Vertical Physics --> Boundary Layer Mixing --> Details\n30. Vertical Physics --> Boundary Layer Mixing --> Tracers\n31. Vertical Physics --> Boundary Layer Mixing --> Momentum\n32. Vertical Physics --> Interior Mixing --> Details\n33. Vertical Physics --> Interior Mixing --> Tracers\n34. Vertical Physics --> Interior Mixing --> Momentum\n35. Uplow Boundaries --> Free Surface\n36. Uplow Boundaries --> Bottom Boundary Layer\n37. Boundary Forcing\n38. Boundary Forcing --> Momentum --> Bottom Friction\n39. Boundary Forcing --> Momentum --> Lateral Friction\n40. Boundary Forcing --> Tracers --> Sunlight Penetration\n41. Boundary Forcing --> Tracers --> Fresh Water Forcing \n1. Key Properties\nOcean key properties\n1.1. Model Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of ocean model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.2. Model Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of ocean model code (NEMO 3.6, MOM 5.0,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"1.3. Model Family\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of ocean model.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.model_family') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"OGCM\" \n# \"slab ocean\" \n# \"mixed layer ocean\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.4. Basic Approximations\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nBasic approximations made in the ocean.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.basic_approximations') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Primitive equations\" \n# \"Non-hydrostatic\" \n# \"Boussinesq\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"1.5. Prognostic Variables\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nList of prognostic variables in the ocean component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.prognostic_variables') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# \"Salinity\" \n# \"U-velocity\" \n# \"V-velocity\" \n# \"W-velocity\" \n# \"SSH\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2. Key Properties --> Seawater Properties\nPhysical properties of seawater in ocean\n2.1. Eos Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Wright, 1997\" \n# \"Mc Dougall et al.\" \n# \"Jackett et al. 2006\" \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2.2. Eos Functional Temp\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTemperature used in EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Potential temperature\" \n# \"Conservative temperature\" \n# TODO - please enter value(s)\n",
"2.3. Eos Functional Salt\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nSalinity used in EOS for sea water",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Practical salinity Sp\" \n# \"Absolute salinity Sa\" \n# TODO - please enter value(s)\n",
"2.4. Eos Functional Depth\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDepth or pressure used in EOS for sea water ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Pressure (dbars)\" \n# \"Depth (meters)\" \n# TODO - please enter value(s)\n",
"2.5. Ocean Freezing Point\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nEquation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"TEOS 2010\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"2.6. Ocean Specific Heat\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nSpecific heat in ocean (cpocean) in J/(kg K)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"2.7. Ocean Reference Density\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nBoussinesq reference density (rhozero) in kg / m3",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"3. Key Properties --> Bathymetry\nProperties of bathymetry in ocean\n3.1. Reference Dates\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nReference date of bathymetry",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Present day\" \n# \"21000 years BP\" \n# \"6000 years BP\" \n# \"LGM\" \n# \"Pliocene\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"3.2. Type\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the bathymetry fixed in time in the ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"3.3. Ocean Smoothing\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe any smoothing or hand editing of bathymetry in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"3.4. Source\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe source of bathymetry in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.bathymetry.source') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4. Key Properties --> Nonoceanic Waters\nNon oceanic waters treatement in ocean\n4.1. Isolated Seas\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how isolated seas is performed",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"4.2. River Mouth\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe if/how river mouth mixing or estuaries specific treatment is performed",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5. Key Properties --> Software Properties\nSoftware properties of ocean code\n5.1. Repository\nIs Required: FALSE Type: STRING Cardinality: 0.1\nLocation of code for this component.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.repository') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.2. Code Version\nIs Required: FALSE Type: STRING Cardinality: 0.1\nCode version identifier.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_version') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"5.3. Code Languages\nIs Required: FALSE Type: STRING Cardinality: 0.N\nCode language(s).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6. Key Properties --> Resolution\nResolution in the ocean grid\n6.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nThis is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.2. Canonical Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nExpression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.3. Range Horizontal Resolution\nIs Required: TRUE Type: STRING Cardinality: 1.1\nRange of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"6.4. Number Of Horizontal Gridpoints\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTotal number of horizontal (XY) points (or degrees of freedom) on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"6.5. Number Of Vertical Levels\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nNumber of vertical levels resolved on computational grid.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"6.6. Is Adaptive Grid\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nDefault is False. Set true if grid resolution changes during execution.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"6.7. Thickness Level 1\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nThickness of first surface ocean level (in meters)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"7. Key Properties --> Tuning Applied\nTuning methodology for ocean component\n7.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nGeneral overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.2. Global Mean Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList set of metrics of the global mean state used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.3. Regional Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"7.4. Trend Metrics Used\nIs Required: FALSE Type: STRING Cardinality: 0.N\nList observed trend metrics used in tuning model/component",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8. Key Properties --> Conservation\nConservation in the ocean component\n8.1. Description\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBrief description of conservation methodology",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.description') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.N\nProperties conserved in the ocean by the numerical schemes",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.scheme') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Energy\" \n# \"Enstrophy\" \n# \"Salt\" \n# \"Volume of ocean\" \n# \"Momentum\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"8.3. Consistency Properties\nIs Required: FALSE Type: STRING Cardinality: 0.1\nAny additional consistency properties (energy conversion, pressure gradient discretisation, ...)?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.4. Corrected Conserved Prognostic Variables\nIs Required: FALSE Type: STRING Cardinality: 0.1\nSet of variables which are conserved by more than the numerical scheme alone.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"8.5. Was Flux Correction Used\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nDoes conservation involve flux correction ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"9. Grid\nOcean grid\n9.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of grid in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"10. Grid --> Discretisation --> Vertical\nProperties of vertical discretisation in ocean\n10.1. Coordinates\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of vertical coordinates in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Z-coordinate\" \n# \"Z*-coordinate\" \n# \"S-coordinate\" \n# \"Isopycnic - sigma 0\" \n# \"Isopycnic - sigma 2\" \n# \"Isopycnic - sigma 4\" \n# \"Isopycnic - other\" \n# \"Hybrid / Z+S\" \n# \"Hybrid / Z+isopycnic\" \n# \"Hybrid / other\" \n# \"Pressure referenced (P)\" \n# \"P*\" \n# \"Z**\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"10.2. Partial Steps\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nUsing partial steps with Z or Z vertical coordinate in ocean ?*",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"11. Grid --> Discretisation --> Horizontal\nType of horizontal discretisation scheme in ocean\n11.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal grid type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Lat-lon\" \n# \"Rotated north pole\" \n# \"Two north poles (ORCA-style)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11.2. Staggering\nIs Required: FALSE Type: ENUM Cardinality: 0.1\nHorizontal grid staggering type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Arakawa B-grid\" \n# \"Arakawa C-grid\" \n# \"Arakawa E-grid\" \n# \"N/a\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"11.3. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nHorizontal discretisation scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Finite difference\" \n# \"Finite volumes\" \n# \"Finite elements\" \n# \"Unstructured grid\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"12. Timestepping Framework\nOcean Timestepping Framework\n12.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of time stepping in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"12.2. Diurnal Cycle\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiurnal cycle type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Via coupling\" \n# \"Specific treatment\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13. Timestepping Framework --> Tracers\nProperties of tracers time stepping in ocean\n13.1. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTracers time stepping scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"13.2. Time Step\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nTracers time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"14. Timestepping Framework --> Baroclinic Dynamics\nBaroclinic dynamics in ocean\n14.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBaroclinic dynamics type",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Preconditioned conjugate gradient\" \n# \"Sub cyling\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nBaroclinic dynamics scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Leap-frog + Asselin filter\" \n# \"Leap-frog + Periodic Euler\" \n# \"Predictor-corrector\" \n# \"Runge-Kutta 2\" \n# \"AM3-LF\" \n# \"Forward-backward\" \n# \"Forward operator\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"14.3. Time Step\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nBaroclinic time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"15. Timestepping Framework --> Barotropic\nBarotropic time stepping in ocean\n15.1. Splitting\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nTime splitting method",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"split explicit\" \n# \"implicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"15.2. Time Step\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nBarotropic time step (in seconds)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"16. Timestepping Framework --> Vertical Physics\nVertical physics time stepping in ocean\n16.1. Method\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDetails of vertical time stepping in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"17. Advection\nOcean advection\n17.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of advection in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18. Advection --> Momentum\nProperties of lateral momemtum advection scheme in ocean\n18.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of lateral momemtum advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Flux form\" \n# \"Vector form\" \n# TODO - please enter value(s)\n",
"18.2. Scheme Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nName of ocean momemtum advection scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.scheme_name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"18.3. ALE\nIs Required: FALSE Type: BOOLEAN Cardinality: 0.1\nUsing ALE for vertical advection ? (if vertical coordinates are sigma)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.momentum.ALE') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"19. Advection --> Lateral Tracers\nProperties of lateral tracer advection scheme in ocean\n19.1. Order\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nOrder of lateral tracer advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"19.2. Flux Limiter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nMonotonic flux limiter for lateral tracer advection scheme in ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"19.3. Effective Order\nIs Required: TRUE Type: FLOAT Cardinality: 1.1\nEffective order of limited lateral tracer advection scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"19.4. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"19.5. Passive Tracers\nIs Required: FALSE Type: ENUM Cardinality: 0.N\nPassive tracers advected",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers') \n\n# PROPERTY VALUE(S): \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Ideal age\" \n# \"CFC 11\" \n# \"CFC 12\" \n# \"SF6\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"19.6. Passive Tracers Advection\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIs advection of passive tracers different than active ? if so, describe.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20. Advection --> Vertical Tracers\nProperties of vertical tracer advection scheme in ocean\n20.1. Name\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.name') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"20.2. Flux Limiter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nMonotonic flux limiter for vertical tracer advection scheme in ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"21. Lateral Physics\nOcean lateral physics\n21.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of lateral physics in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"21.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of transient eddy representation in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Eddy active\" \n# \"Eddy admitting\" \n# TODO - please enter value(s)\n",
"22. Lateral Physics --> Momentum --> Operator\nProperties of lateral physics operator for momentum in ocean\n22.1. Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDirection of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.2. Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrder of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"22.3. Discretisation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiscretisation of lateral physics momemtum scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff\nProperties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean\n23.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLateral physics momemtum eddy viscosity coeff type in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"23.2. Constant Coefficient\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"23.3. Variable Coefficient\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23.4. Coeff Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"23.5. Coeff Backscatter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"24. Lateral Physics --> Tracers\nProperties of lateral physics for tracers in ocean\n24.1. Mesoscale Closure\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there a mesoscale closure in the lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"24.2. Submesoscale Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"25. Lateral Physics --> Tracers --> Operator\nProperties of lateral physics operator for tracers in ocean\n25.1. Direction\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDirection of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Horizontal\" \n# \"Isopycnal\" \n# \"Isoneutral\" \n# \"Geopotential\" \n# \"Iso-level\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.2. Order\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nOrder of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Harmonic\" \n# \"Bi-harmonic\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"25.3. Discretisation\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nDiscretisation of lateral physics tracers scheme in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Second order\" \n# \"Higher order\" \n# \"Flux limiter\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff\nProperties of eddy diffusity coeff in lateral physics tracers scheme in the ocean\n26.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nLateral physics tracers eddy diffusity coeff type in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant\" \n# \"Space varying\" \n# \"Time + space varying (Smagorinsky)\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"26.2. Constant Coefficient\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"26.3. Variable Coefficient\nIs Required: FALSE Type: STRING Cardinality: 0.1\nIf space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"26.4. Coeff Background\nIs Required: TRUE Type: INTEGER Cardinality: 1.1\nDescribe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"26.5. Coeff Backscatter\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"27. Lateral Physics --> Tracers --> Eddy Induced Velocity\nProperties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean\n27.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of EIV in lateral physics tracers in the ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"GM\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"27.2. Constant Val\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf EIV scheme for tracers is constant, specify coefficient value (M2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"27.3. Flux Type\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of EIV flux (advective or skew)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"27.4. Added Diffusivity\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of EIV added diffusivity (constant, flow dependent or none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"28. Vertical Physics\nOcean Vertical Physics\n28.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of vertical physics in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"29. Vertical Physics --> Boundary Layer Mixing --> Details\nProperties of vertical physics in ocean\n29.1. Langmuir Cells Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there Langmuir cells mixing in upper ocean ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"30. Vertical Physics --> Boundary Layer Mixing --> Tracers\n*Properties of boundary layer (BL) mixing on tracers in the ocean *\n30.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of boundary layer mixing for tracers in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"30.2. Closure Order\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.3. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant BL mixing of tracers, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"30.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground BL mixing of tracers coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"31. Vertical Physics --> Boundary Layer Mixing --> Momentum\n*Properties of boundary layer (BL) mixing on momentum in the ocean *\n31.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of boundary layer mixing for momentum in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure - TKE\" \n# \"Turbulent closure - KPP\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Turbulent closure - Bulk Mixed Layer\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"31.2. Closure Order\nIs Required: FALSE Type: FLOAT Cardinality: 0.1\nIf turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"31.3. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant BL mixing of momentum, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"31.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground BL mixing of momentum coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32. Vertical Physics --> Interior Mixing --> Details\n*Properties of interior mixing in the ocean *\n32.1. Convection Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of vertical convection in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Non-penetrative convective adjustment\" \n# \"Enhanced vertical diffusion\" \n# \"Included in turbulence closure\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"32.2. Tide Induced Mixing\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how tide induced mixing is modelled (barotropic, baroclinic, none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"32.3. Double Diffusion\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there double diffusion",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"32.4. Shear Mixing\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs there interior shear mixing",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"33. Vertical Physics --> Interior Mixing --> Tracers\n*Properties of interior mixing on tracers in the ocean *\n33.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of interior mixing for tracers in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"33.2. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant interior mixing of tracers, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"33.3. Profile\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIs the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"33.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground interior mixing of tracers coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34. Vertical Physics --> Interior Mixing --> Momentum\n*Properties of interior mixing on momentum in the ocean *\n34.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of interior mixing for momentum in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Constant value\" \n# \"Turbulent closure / TKE\" \n# \"Turbulent closure - Mellor-Yamada\" \n# \"Richardson number dependent - PP\" \n# \"Richardson number dependent - KT\" \n# \"Imbeded as isopycnic vertical coordinate\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"34.2. Constant\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf constant interior mixing of momentum, specific coefficient (m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"34.3. Profile\nIs Required: TRUE Type: STRING Cardinality: 1.1\nIs the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"34.4. Background\nIs Required: TRUE Type: STRING Cardinality: 1.1\nBackground interior mixing of momentum coefficient, (schema and value in m2/s - may by none)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35. Uplow Boundaries --> Free Surface\nProperties of free surface in ocean\n35.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of free surface in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"35.2. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nFree surface scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear implicit\" \n# \"Linear filtered\" \n# \"Linear semi-explicit\" \n# \"Non-linear implicit\" \n# \"Non-linear filtered\" \n# \"Non-linear semi-explicit\" \n# \"Fully explicit\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"35.3. Embeded Seaice\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the sea-ice embeded in the ocean model (instead of levitating) ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"36. Uplow Boundaries --> Bottom Boundary Layer\nProperties of bottom boundary layer in ocean\n36.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of bottom boundary layer in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"36.2. Type Of Bbl\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of bottom boundary layer in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Diffusive\" \n# \"Acvective\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"36.3. Lateral Mixing Coef\nIs Required: FALSE Type: INTEGER Cardinality: 0.1\nIf bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# TODO - please enter value(s)\n",
"36.4. Sill Overflow\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe any specific treatment of sill overflows",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37. Boundary Forcing\nOcean boundary forcing\n37.1. Overview\nIs Required: TRUE Type: STRING Cardinality: 1.1\nOverview of boundary forcing in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.overview') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.2. Surface Pressure\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.3. Momentum Flux Correction\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.4. Tracers Flux Correction\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.5. Wave Effects\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how wave effects are modelled at ocean surface.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.wave_effects') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.6. River Runoff Budget\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe how river runoff from land surface is routed to ocean and any global adjustment done.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"37.7. Geothermal Heating\nIs Required: TRUE Type: STRING Cardinality: 1.1\nDescribe if/how geothermal heating is present at ocean bottom.",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"38. Boundary Forcing --> Momentum --> Bottom Friction\nProperties of momentum bottom friction in ocean\n38.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of momentum bottom friction in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Linear\" \n# \"Non-linear\" \n# \"Non-linear (drag function of speed of tides)\" \n# \"Constant drag coefficient\" \n# \"None\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"39. Boundary Forcing --> Momentum --> Lateral Friction\nProperties of momentum lateral friction in ocean\n39.1. Type\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of momentum lateral friction in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"None\" \n# \"Free-slip\" \n# \"No-slip\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"40. Boundary Forcing --> Tracers --> Sunlight Penetration\nProperties of sunlight penetration scheme in ocean\n40.1. Scheme\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of sunlight penetration scheme in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"1 extinction depth\" \n# \"2 extinction depth\" \n# \"3 extinction depth\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"40.2. Ocean Colour\nIs Required: TRUE Type: BOOLEAN Cardinality: 1.1\nIs the ocean sunlight penetration scheme ocean colour dependent ?",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(value) \n# Valid Choices: \n# True \n# False \n# TODO - please enter value(s)\n",
"40.3. Extinction Depth\nIs Required: FALSE Type: STRING Cardinality: 0.1\nDescribe and list extinctions depths for sunlight penetration scheme (if applicable).",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"41. Boundary Forcing --> Tracers --> Fresh Water Forcing\nProperties of surface fresh water forcing in ocean\n41.1. From Atmopshere\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of surface fresh water forcing from atmos in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.2. From Sea Ice\nIs Required: TRUE Type: ENUM Cardinality: 1.1\nType of surface fresh water forcing from sea-ice in ocean",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# Valid Choices: \n# \"Freshwater flux\" \n# \"Virtual salt flux\" \n# \"Real salt flux\" \n# \"Other: [Please specify]\" \n# TODO - please enter value(s)\n",
"41.3. Forced Mode Restoring\nIs Required: TRUE Type: STRING Cardinality: 1.1\nType of surface salinity restoring in forced mode (OMIP)",
"# PROPERTY ID - DO NOT EDIT ! \nDOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring') \n\n# PROPERTY VALUE: \n# Set as follows: DOC.set_value(\"value\") \n# TODO - please enter value(s)\n",
"©2017 ES-DOC"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
kit-cel/wt
|
sigNT/systems/frequency_response.ipynb
|
gpl-2.0
|
[
"Content and Objective\n\nShow that frequency response can be generated by stimulating an LTI system with harmonics.\nIt is shown that Fourier transform of the impulse response yields identical results.\n\nImporting and Plotting Options",
"# importing\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# showing figures inline\n%matplotlib inline\n\n# plotting options \nfont = {'size' : 20}\nplt.rc('font', **font)\nplt.rc('text', usetex=True)\n\nmatplotlib.rc('figure', figsize=(18, 10) )",
"Parameters",
"# length of impulse response\nN = 10\n\n# switch for choosing different impulse responses --> you may add more options if you like to\nswitch = 2\n\nif switch == 1: \n h = np.ones(N)\n\nelif switch == 2:\n a = 0.5\n h = a**( - np.arange( 0, N ) )\n \n \n# padding zeros \nh = np.hstack( [h, np.zeros_like( h ) ] )",
"Getting Frequency Response by Applying FFT",
"# frequency response by FFT\nH_fft = np.fft.fft( np.hstack( [ h, np.zeros( 9 * len( h ) ) ] ) )\n\n# frequency domain out of FFT parameters\ndelta_Omega = 2 * np.pi / len(H_fft )\nOmega = np.arange( -np.pi, np.pi, delta_Omega )",
"Getting Frequency Response as Response to Harmonics",
"# coarse quantiziation of frequency regime for the filterung in order to reduce computational load\nN_coarse = 100\n\ndelta_Omega_coarse = 2 * np.pi / N_coarse\nOmega_coarse = np.arange( -np.pi, np.pi, delta_Omega_coarse )\n\n# getting values of frequency response by filtering\nH_response = np.zeros_like( Omega_coarse, dtype = 'complex' )\n\nfor ind_Omega, val_Omega in enumerate( Omega_coarse ):\n\n # length of signal, time vector and IN signal\n N_sig = 500\n n = np.arange( 0, N_sig + 1 )\n x = np.exp( 1j * val_Omega * n )\n \n # OUT signal by convolution\n y = np.convolve( x, h ) \n \n # frequency response as factor\n # NOTE: since the factor is the same for all times, an arbitrary sample may be chosen\n H_response[ ind_Omega ] = y[ N_sig // 4 ] * x[ N_sig // 4 ].conjugate()",
"Plotting",
"plt.figure()\n\nplt.plot( Omega, np.abs( np.fft.fftshift( H_fft ) ), label= '$|H_{FFT}(\\\\Omega)|$' )\nplt.plot( Omega_coarse, np.abs( H_response ), label= '$|H_{resp.}(\\\\Omega)|$')\n\nplt.grid( True )\nplt.xlabel('$\\\\Omega$')\nplt.legend( loc='upper right')\n\n\nplt.figure()\n\nplt.plot( Omega, np.angle( np.fft.fftshift( H_fft ) ), label = '$\\\\angle H_{FFT}(\\\\Omega)$' )\nplt.plot( Omega_coarse, np.angle( H_response ), label = '$\\\\angle H_{resp.}(\\\\Omega)$' )\n\nplt.grid( True )\nplt.xlabel('$\\\\Omega$')\nplt.legend( loc='upper right')\n\n\nplt.figure()\n\nplt.plot( Omega[:-1], - np.diff( np.angle( np.fft.fftshift( H_fft ) ) ), label = '$\\\\tau_{g}(\\\\Omega)$' )\n\nplt.grid( True )\nplt.xlabel('$\\\\Omega$')\nplt.legend( loc='upper right')\n"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
elenduuche/deep-learning
|
autoencoder/Simple_Autoencoder_Solution.ipynb
|
mit
|
[
"A Simple Autoencoder\nWe'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.\n\nIn this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.",
"%matplotlib inline\n\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', validation_size=0)",
"Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.",
"img = mnist.train.images[2]\nplt.imshow(img.reshape((28, 28)), cmap='Greys_r')",
"We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a single ReLU hidden layer. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a sigmoid activation on the output layer to get values matching the input.\n\n\nExercise: Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. Feel free to use TensorFlow's higher level API, tf.layers. For instance, you would use tf.layers.dense(inputs, units, activation=tf.nn.relu) to create a fully connected layer with a ReLU activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this tf.nn.sigmoid_cross_entropy_with_logits (documentation). You should note that tf.nn.sigmoid_cross_entropy_with_logits takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.",
"# Size of the encoding layer (the hidden layer)\nencoding_dim = 32\n\nimage_size = mnist.train.images.shape[1]\n\ninputs_ = tf.placeholder(tf.float32, (None, image_size), name='inputs')\ntargets_ = tf.placeholder(tf.float32, (None, image_size), name='targets')\n\n# Output of hidden layer\nencoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)\n\n# Output layer logits\nlogits = tf.layers.dense(encoded, image_size, activation=None)\n# Sigmoid output from\ndecoded = tf.nn.sigmoid(logits, name='output')\n\nloss = tf.nn.sigmoid_cross_entropy_with_logits(labels=targets_, logits=logits)\ncost = tf.reduce_mean(loss)\nopt = tf.train.AdamOptimizer(0.001).minimize(cost)",
"Training",
"# Create the session\nsess = tf.Session()",
"Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss and the test loss afterwards. \nCalling mnist.train.next_batch(batch_size) will return a tuple of (images, labels). We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with sess.run(tf.global_variables_initializer()). Then, run the optimizer and get the loss with batch_cost, _ = sess.run([cost, opt], feed_dict=feed).",
"epochs = 20\nbatch_size = 200\nsess.run(tf.global_variables_initializer())\nfor e in range(epochs):\n for ii in range(mnist.train.num_examples//batch_size):\n batch = mnist.train.next_batch(batch_size)\n feed = {inputs_: batch[0], targets_: batch[0]}\n batch_cost, _ = sess.run([cost, opt], feed_dict=feed)\n\n print(\"Epoch: {}/{}...\".format(e+1, epochs),\n \"Training loss: {:.4f}\".format(batch_cost))",
"Checking out the results\nBelow I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.",
"fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))\nin_imgs = mnist.test.images[:10]\nreconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})\n\nfor images, row in zip([in_imgs, reconstructed], axes):\n for img, ax in zip(images, row):\n ax.imshow(img.reshape((28, 28)), cmap='Greys_r')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\nfig.tight_layout(pad=0.1)\n\nsess.close()",
"Up Next\nWe're dealing with images here, so we can (usually) get better performance using convolution layers. So, next we'll build a better autoencoder with convolutional layers.\nIn practice, autoencoders aren't actually better at compression compared to typical methods like JPEGs and MP3s. But, they are being used for noise reduction, which you'll also build."
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
vivek8943/soft-boiled
|
notebooks/Explore_GMM.ipynb
|
apache-2.0
|
[
"Explore GMM Results\nThis notebook allows for the exploration of Gaussian Mixture Model based geoinferencing. It uses test messages and a pretrained model to begin to explore why the answer is what it is.",
"from src.algorithms.gmm import GMM\nfrom src.utils.geo import haversine\nimport numpy as np\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n# Import GMM Model Code\nfrom src.algorithms.gmm import GMM\nimport math\n# Make our plots default to larger\nplt.rcParams['figure.figsize'] = (12.0, 10.0)",
"Load a pretrained GMM Model and import training data ('data_path')",
"# Load test data\ntemp_table_name = 'tweets'\ntest_data_path = 'hdfs:///path/to/test/data/*'\n\ng = GMM(sc, sqlCtx, {'fields':set(['user.location', 'text']), 'json_path':'/local/path/to/twitter_format.json'})\n# Train Model [This takes a while so make sure to save it]\n#g.train('hdfs:///path/to/training/data')\n#g.save('/local/path/to/pretrained_model.pkl')\n\n# Load pretrained model\ng.load('/local/path/to/pretrained_model.pkl')",
"Evaluate the performance of this model on a set of test data",
"g.test(test_data_path)",
"Pull a set of test data to use for interactive exploration",
"# Create a temporary table in this context which allows us to explore interatively\nall_tweets = sqlCtx.parquetFile(test_data_path)\nall_tweets.cache()\nall_tweets.registerTempTable(temp_table_name)\n\n# NOTE: This where clause filters out US geo coorindates \n#where_clause = \"lang = 'en' and geo.coordinates is not null and user.location is not null\"\nwhere_clause = \"geo.coordinates is not null and user.location is not null\"\nlimit = 10000\nsql_stmt = 'select * from %s where %s limit %d'%(temp_table_name, where_clause, limit)\nprint 'Query Executed:', sql_stmt\n\n# Peform query and pull to local context\n# TODO: Consider switching to sampling\nlocal_tweets = sqlCtx.sql(sql_stmt).collect()\nprint 'Pulled %d tweets into local context'%len(local_tweets)",
"Helper functions",
"def print_tweet(tweet):\n print\n print 'Text:', tweet.text\n print 'User Specified Location:', tweet.user.location\n print 'Location:', tweet.geo.coordinates\n\n# Temporary block of code until the new gmm models are run\n####TODO REMOVE THIS when Re-Run!!!!!!!\nfrom sklearn import mixture\n\ndef combine_gmms(gmms):\n \"\"\" Takes an array of gaussian mixture models and produces a GMM that is the weighted sum of the models\"\"\"\n n_components = sum([g[0].n_components for g in gmms])\n covariance_type = gmms[0][0].covariance_type\n new_gmm = mixture.GMM(n_components=n_components, covariance_type=covariance_type)\n new_gmm.means_ = np.concatenate([g[0].means_ for g in gmms])\n new_gmm.covars_ = np.concatenate([g[0].covars_ for g in gmms])\n weights = np.concatenate([g[0].weights_ * ((1/g[1])**4) for g in gmms])\n new_gmm.weights_ = weights / np.sum(weights) # Normalize weights\n new_gmm.converged_ = True\n return new_gmm\n\ndef get_gmm_info(inputRow):\n (location, tokens) = GMM.tokenize(inputRow, fields=g.options['fields'])\n true_lat, true_lon = location\n models = []\n for token in tokens:\n if token in g.model:\n models.append(g.model[token])\n #min_error = None\n errors = [m[1] for m in models]\n if len(errors) > 0:\n min_error = min(errors)\n else:\n min_error = None\n \n if len(models) > 1:\n combined_gmm = combine_gmms(models)\n elif len(models) == 1:\n combined_gmm = models[0][0]\n else:\n np.nan\n #return (None, np.nan, np.nan, None)\n (best_lat, best_lon) = combined_gmm.means_[np.argmax(combined_gmm.weights_)]\n distance = haversine(best_lon, best_lat, true_lon, true_lat)\n return ((best_lat, best_lon), min_error, distance, combined_gmm)\n\ndef plot_gmm(gmm_model, true_ll=None, percent=None):\n plt.figure()\n y = np.linspace(90,-90, num=180)\n x = np.linspace(-180,180, num=360)\n X, Y = np.meshgrid(y,x)\n XX = np.array([X.ravel(), Y.ravel()]).T\n #Z = np.log(-gmm_model.score_samples(XX)[0]+1)\n Z = -gmm_model.score_samples(XX)[0]\n \n if percent:\n Z = np.exp2(gmm_model.score_samples(XX)[0]) \n target = np.sum(Z)*percent\n z_sorted = sorted(Z, reverse=True)\n i = 0\n sum = 0\n while sum < target:\n sum += z_sorted[i]\n i += 1\n print 'Percent of the world:', float(XX[Z > z_sorted[i]].shape[0])/XX.shape[0]*100\n Z[Z < z_sorted[i]] = 0\n Z = -np.log(Z)\n #Z = Z.reshape(X.shape)\n \n Z = Z.reshape(X.shape)\n\n m = Basemap(projection='mill', lon_0=0)#,lon_0=0.5*(lons[0]+lons[-1]))\n\n m.drawcountries()\n m.drawcoastlines()\n m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels\n m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1]) # draw meridians\n #m.drawcounties()\n\n #cmap = mpl.cm.RdYlBu\n cmap = mpl.cm.pink\n norm = mpl.colors.Normalize(vmin=0, vmax = 5)\n\n X, Y = m(Y,X)\n CS = m.contourf(X, Y, Z, 25,linewidths=1.5, cmap=cmap)#, norm=norm)\n #CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),\n # levels=np.logspace(0, 3, 20))\n CB = plt.colorbar(CS, shrink=0.8, extend='both')\n \n # Plot estimated location\n (best_lat, best_lon) = gmm_model.means_[np.argmax(gmm_model.weights_)]\n best_lat, best_lon = m(best_lon, best_lat) \n plt.plot(best_lat, best_lon, '*g')\n \n if true_ll:\n lat, lon = m(true_ll[1], true_ll[0])\n plt.plot(lat, lon, '*b')\n \n #lon, lat = gmm_model.means_[i]\n for i in range (0, gmm_model.n_components):\n lat, lon = gmm_model.means_[i]\n weight = gmm_model.weights_[i]\n x, y = m(lon, lat)\n #plt.plot(x, y, 'or')#, markersize=6*weight)\n plt.show()\n\ndef plot_gmm_w_percentage(gmm_model, percent):\n y = np.linspace(90,-90, num=180)\n x = np.linspace(-180,180, num=360)\n X, Y = np.meshgrid(y,x)\n XX = np.array([X.ravel(), Y.ravel()]).T\n Z = np.exp(gmm_model.score_samples(XX)[0]) \n target = np.sum(Z)*percent\n z_sorted = sorted(Z, reverse=True)\n i = 0\n sum = 0\n while sum < target:\n sum += z_sorted[i]\n i += 1\n print 'Percent of the world:', float(XX[Z > z_sorted[i]].shape[0])/XX.shape[0]*100\n Z[Z < z_sorted[i]] = 0\n Z = -np.log(Z)\n Z = Z.reshape(X.shape)\n m = Basemap(projection='mill', lon_0=0)#,lon_0=0.5*(lons[0]+lons[-1]))\n\n m.drawcountries()\n m.drawcoastlines()\n m.drawparallels(np.arange(-90.,120.,30.),labels=[1,0,0,0]) # draw parallels\n m.drawmeridians(np.arange(0.,420.,60.),labels=[0,0,0,1]) # draw meridians\n #m.drawcounties()\n\n #cmap = mpl.cm.RdYlBu\n cmap = mpl.cm.pink\n norm = mpl.colors.Normalize(vmin=0, vmax = 5)\n\n X, Y = m(Y,X)\n CS = m.contourf(X, Y, Z, 25,linewidths=1.5, cmap=cmap)#, norm=norm)\n #CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),\n # levels=np.logspace(0, 3, 20))\n CB = plt.colorbar(CS, shrink=0.8, extend='both')\n plt.show()\n\n# Created a combined GMM for a tweet, score GMM performance and create a contour plot\ndef plot_row(inputRow): \n (est_location, min_error, error_distance, combined_gmm) = get_gmm_info(inputRow)\n print_tweet(inputRow)\n print 'Estimated Location:', est_location\n print 'Error (km):', error_distance\n plot_gmm(combined_gmm, true_ll=inputRow.geo.coordinates)",
"Look at probability distribution of a few tweets",
"plot_row(local_tweets[0])\n\nplot_row(local_tweets[6])\n\n# Print all the tweets we've pulled into the local context\nfor i, t in enumerate(local_tweets):\n print i, t.text\n\n# Compute local array of actual error and min training error\nmin_errors = []\nactual_errors = []\nskipped = 0\nfor i in range(len(local_tweets)):\n try:\n (est_location, min_error, error_distance, combined_gmm) = get_gmm_info(local_tweets[i])\n if min_error != 0 and error_distance !=0:\n min_errors.append(min_error)\n actual_errors.append(error_distance)\n except:\n skipped += 1\nprint 'Skipped %d of %d'%(skipped, len(local_tweets))",
"Compare prediction error [measured] to error in training data\nX-axis [km]: The error of the word with the minimum error in the training set. Error in the training set is defined as the median distance between the most likely point and every occurrence of that word in the training data\nY-axis [km]: The distance between the most likely point and the actual point\nThe second plot is a log-log plot with the same axes",
"plt.figure()\n#plt.plot(np.log(min_errors), np.log(actual_errors), '.')\nplt.plot(min_errors, actual_errors, '.')\nplt.axis([0,3000,0,3000])\n#plt.axis([0,np.log(3000),0,np.log(3000)])\n\n#print min(actual_errors)\nfrom scipy.stats import pearsonr\nprint pearsonr(min_errors, actual_errors)\nprint pearsonr(np.log(min_errors), np.log(actual_errors))\n\nplt.figure()\nplt.plot(np.log(min_errors), np.log(actual_errors), '.')\nplt.axis([0,np.log(3000),0,np.log(3000)])",
"Same plot as above but this time containing N percent of the probability mass",
"percent_of_mass_to_include = 0.8\ntweet = local_tweets[85]\n(est_location, min_error, error_distance, combined_gmm) = get_gmm_info(tweet)\nprint_tweet(tweet)\nprint 'Estimated Location:', est_location\nprint 'Error (km):', error_distance\nplot_gmm(combined_gmm, true_ll=tweet.geo.coordinates, percent=percent_of_mass_to_include)\n\n(est_location, min_error, error_distance, gmm_model) = get_gmm_info(local_tweets[5])\n\ny = np.linspace(90,-90, num=180*4)\nx = np.linspace(-180,180, num=360*4)\nX, Y = np.meshgrid(y,x)\nXX = np.array([X.ravel(), Y.ravel()]).T\nZ = np.exp(model_the[0].score_samples(XX)[0]) \n\nprint Z.shape\nprint np.sum(Z)*.25*.25\n\nmodel_the = g.model['nyc']\nmodel_the[0].weights_\n\nnp.sum(model_the[0].weights_)\n\nlen(model_the[0].weights_)\n\nb = model_the[0].score_samples(XX)\n\nb[1].shape\n\nb[1][1]\n\nprint max(b[1][1])\nprint sum(b[1][1])",
"Find probability mass for a given bouding box\nFunction using KDE which approximates the area better than a simple mesh grid. Otherwise we found that the mesh-grid could often under-sample the probability for especially 'peaky' distributions (such as nyc).",
"import statsmodels.sandbox.distributions.extras as ext\n#documented at http://statsmodels.sourceforge.net/devel/_modules/statsmodels/sandbox/distributions/extras.html#mvnormcdf\n\ndef prob_mass(gmm_model, upper_bound, lower_bound):\n total_prob = 0\n for i in range(0, len(gmm_model.weights_)):\n val = ext.mvnormcdf(upper_bound, gmm_model.means_[i], gmm_model.covars_[i], lower_bound, maxpts=2000)\n # below is necessary as a very rare occurance causes some guassians to have a result of nan \n #(likely exeedingly low probability)\n if math.isnan(val): \n pass\n else:\n weighted_val = val * gmm_model.weights_[i]\n total_prob += weighted_val\n return total_prob\n\n#good test is to set upper and lower limits to extent of globe and see if it approximates 1\n#coordinates should go \nupper = [90,180]\nlower = [-90,-180]\nprint prob_mass(g.model['nyc'][0], upper, lower)\nprint prob_mass(g.model['the'][0], upper, lower)\n\n#mass of probability within ~10 km of the center point (0.1 degrees is 11.132 KM N/S)\ngmm_model = g.model['nyc'][0]\ndistance = 10\ndistance_deg = distance/111.32\nbest_point = gmm_model.means_[np.argmax(gmm_model.weights_)]\nprint best_point, distance_deg\nupper = [best_point[0]+distance_deg,best_point[1]+distance_deg]\nlower = [best_point[0]-distance_deg,best_point[1]-distance_deg]\nprint gmm_model, upper, lower\nprint prob_mass(gmm_model, upper, lower)\n\n# Compute local array of actual error and probability mass over ~100 km (which is about 62 miles)\nprobs = []\nactual_errors = []\nskipped = 0\ndistance = 100\ndistance_deg = distance/111.32\nact_within = 0\ncnt = 0\nfor i in range(0, len(local_tweets)):\n try:\n (est_location, min_error, error_distance, combined_gmm) = get_gmm_info(local_tweets[i])\n if min_error != 0 and error_distance !=0:\n best_point = combined_gmm.means_[np.argmax(combined_gmm.weights_)]\n upper = [best_point[0]+distance_deg,best_point[1]+distance_deg]\n lower = [best_point[0]-distance_deg,best_point[1]-distance_deg]\n mass = prob_mass(combined_gmm, upper, lower)\n w = np.sum(combined_gmm.weights_)\n if math.isnan(mass):\n skipped += 1\n else:\n actual_loc = local_tweets[i].geo.coordinates\n #print best_point, actual_loc, error_distance\n probs.append(mass)\n actual_errors.append(error_distance)\n cnt += 1\n #check if the actual_location is within the same 100 km box\n if (best_point[0]-distance_deg<actual_loc[0]<best_point[0]+distance_deg and\n best_point[1]-distance_deg<actual_loc[1]<best_point[1]+distance_deg):\n act_within +=1\n except:\n #raise\n #probs.append(0)\n #actual_errors.append(0)\n skipped += 1\ncorrect_ratio = float(act_within)/float(cnt) *100\naverage_prob = np.average(probs)*100\nprint 'Skipped %d of %d'%(skipped, len(local_tweets))\nprint 'There were %d pct of tweets within radius and average probability of %f'%(correct_ratio, average_prob)\n\nradius = [25,50,75,100,125,150,200,250,300,350,400, 450, 500, 600, 700, 800, 900, 1000]\ncorrect = []\naverage = []\nfor r in radius:\n probs = []\n actual_errors = []\n skipped = 0\n distance = r\n distance_deg = distance/111.32\n act_within = 0\n cnt = 0\n for i in range(0, len(local_tweets)):\n try:\n (est_location, min_error, error_distance, combined_gmm) = get_gmm_info(local_tweets[i])\n if min_error != 0 and error_distance !=0:\n best_point = combined_gmm.means_[np.argmax(combined_gmm.weights_)]\n upper = [best_point[0]+distance_deg,best_point[1]+distance_deg]\n lower = [best_point[0]-distance_deg,best_point[1]-distance_deg]\n mass = prob_mass(combined_gmm, upper, lower)\n w = np.sum(combined_gmm.weights_)\n if math.isnan(mass):\n skipped += 1\n else:\n actual_loc = local_tweets[i].geo.coordinates\n #print best_point, actual_loc, error_distance\n probs.append(mass)\n actual_errors.append(error_distance)\n cnt += 1\n #check if the actual_location is within the same 100 km box\n if (best_point[0]-distance_deg<actual_loc[0]<best_point[0]+distance_deg and\n best_point[1]-distance_deg<actual_loc[1]<best_point[1]+distance_deg):\n act_within +=1\n except:\n #raise\n #probs.append(0)\n #actual_errors.append(0)\n skipped += 1\n correct_ratio = float(act_within)/float(cnt) *100\n average_prob = np.average(probs)*100\n correct.append(correct_ratio)\n average.append(average_prob)\n #print 'Skipped %d of %d'%(skipped, len(local_tweets))\n print 'There were %f pct of tweets within %d radius and average probability of %f'%(correct_ratio, r, average_prob)\n\nplt.plot(radius, correct)\nplt.plot(radius, average, 'g')\n\ncorrect\n\nprint probs\nprint np.average(probs)\nprint actual_errors\n\nplt.figure()\n#plt.plot(np.log(min_errors), np.log(actual_errors), '.')\nplt.plot(probs, actual_errors, '.')\nplt.axis([0,1,0,3000])\n#plt.axis([0,np.log(3000),0,np.log(3000)])\n\n#print min(actual_errors)\nfrom scipy.stats import pearsonr\nprint pearsonr(probs, actual_errors)\nprint pearsonr(probs, np.log(actual_errors))\n\nplt.figure()\nplt.plot(np.log(probs), np.log(actual_errors), '.')\n#plt.axis([0,np.log(.75),0,np.log(3000)])\n\nprint min(probs), max(probs), np.average(probs)\n\n(est_location, min_error, error_distance, combined_gmm) = get_gmm_info(local_tweets[85])\n\nbest_point = combined_gmm.means_[np.argmax(combined_gmm.weights_)]\nupper = [best_point[0]+distance_deg,best_point[1]+distance_deg]\nlower = [best_point[0]-distance_deg,best_point[1]-distance_deg]\nmass = prob_mass(combined_gmm, upper, lower)\nprint mass\n\ntotal_prob = 0\nfor i in range(0, len(combined_gmm.weights_)):\n val = ext.mvnormcdf(upper, combined_gmm.means_[i], combined_gmm.covars_[i], lower, maxpts=2000)\n weighted_val = val * combined_gmm.weights_[i]\n total_prob += weighted_val\n if math.isnan(val):\n print i, val, total_prob\n\nplt.hist(probs)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
yvesalexandre/bandicoot
|
demo/demo.ipynb
|
mit
|
[
"Bandicoot notebook\nbandicoot is an open-source python toolbox to analyze mobile phone metadata. For more information, see: http://bandicoot.mit.edu/\nThe source code of the notebook is available as demo.ipynb and a plain\nPython version as demo.py. You can download them from our repository on Github at https://github.com/yvesalexandre/bandicoot/tree/master/demo\n<br />\n<div class=\"alert alert-info\" role=\"alert\">\n<p><strong>Try bandicoot on your phone!</strong></p>\n<p>If you want to try bandicoot with your own data, download our Android app at <a href=\"http://bandicoot.mit.edu/android\">bandicoot.mit.edu/android</a></p>\n</div>\n\n<hr>\n\nInput files\n<img src=\"mini-mockups-01.png\" width=\"80%\" style=\"border: 1px solid #aaa\" />",
"# Records for the user 'ego'\n!head -n 5 data/ego.csv\n\n# GPS locations of cell towers\n!head -n 5 data/antennas.csv",
"Loading a user",
"import bandicoot as bc\n\nU = bc.read_csv('ego', 'data/', 'data/antennas.csv')",
"Visualization\nExport and serve an interactive visualization using:\npython\nbc.visualization.run(U)\nor export only using:\npython\nbc.visualization.export(U, 'my-viz-path')",
"import os\nviz_path = os.path.dirname(os.path.realpath(__name__)) + '/viz'\n\nbc.visualization.export(U, viz_path)\n\nfrom IPython.display import IFrame\nIFrame(\"/files/viz/index.html\", \"100%\", 700)",
"Individual and spatial indicators\nUsing bandicoot, compute aggregated indicators from bc.individual and bc.spatial:",
"bc.individual.percent_initiated_conversations(U)\n\nbc.spatial.number_of_antennas(U)\n\nbc.spatial.radius_of_gyration(U)",
"Let's play with indicators\nThe signature of the active_days indicators is:\npython\nbc.individual.active_days(user, groupby='week', interaction='callandtext', summary='default', split_week=False, split_day=False, filter_empty=True, datatype=None)\nWhat does that mean?\n<hr />\n\nThe ‘groupby’ keyword\n<br />\n<div class=\"alert alert-info\" role=\"alert\">\n<p><strong>Weekly aggregation</strong></p>\n<p>By default, _bandicoot_ computes the indicators on a weekly basis and returns the average (mean) over all the weeks available and its standard deviation (std) in a nested dictionary.</p>\n\n<img src=\"mini-mockups-02.png\" width=\"80%\" style=\"border: 1px solid #aaa\" />\n\n\n</div>",
"bc.individual.active_days(U)",
"The groupby keyword controls the aggregation:\n\ngroupby='week' to divide by week (by default),\ngroupby='month' to divide by month,\ngroupby=None to aggregate all values.",
"bc.individual.active_days(U, groupby='week')\n\nbc.individual.active_days(U, groupby='month')\n\nbc.individual.active_days(U, groupby=None)",
"The ‘summary’ keyword\nSome indicators such as active_days returns one number. Others, such as duration_of_calls returns a distribution.\nThe summary keyword can take three values:\n\nsummary='default' to return mean and standard deviation,\nsummary='extended' for the second type of indicators, to return mean, sem, median, skewness and std of the distribution,\nsummary=None to return the full distribution.",
"bc.individual.call_duration(U)\n\nbc.individual.call_duration(U, summary='extended')\n\nbc.individual.call_duration(U, summary=None)",
"Splitting days and weeks\n\nsplit_week divide records by 'all week', 'weekday', and 'weekend'.\nsplit_day divide records by 'all day', 'day', and 'night'.",
"bc.individual.active_days(U, split_week=True, split_day=True)",
"Exporting indicators\nThe function bc.utils.all computes automatically all indicators for a single user.\nYou can use the same keywords to group by week/month/all time range, or return extended statistics.",
"features = bc.utils.all(U, groupby=None)\n\nfeatures",
"Exporting in CSV and JSON\nbandicoot supports exports in CSV and JSON format. Both to_csv and to_json functions require either a single feature dictionnary, or a list of dictionnaries (for multiple users).",
"bc.to_csv(features, 'demo_export_user.csv')\nbc.to_json(features, 'demo_export_user.json')\n\n!head demo_export_user.csv\n\n!head -n 15 demo_export_user.json",
"Extending bandicoot\nYou can easily develop your indicator using the @grouping decorator. You only need to write a function taking as input a list of records and returning an integer or a list of integers (for a distribution). The @grouping decorator wraps the function and call it for each group of weeks.",
"from bandicoot.helper.group import grouping\n\n@grouping(interaction='call')\ndef shortest_call(records):\n in_durations = (r.call_duration for r in records)\n return min(in_durations)\n\nshortest_call(U)\n\nshortest_call(U, split_day=True)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
srcole/qwm
|
sfn/.ipynb_checkpoints/Poster viewer distribution by state-checkpoint.ipynb
|
mit
|
[
"Poster popularity by country\nThis notebook loads data of poster viewership at the SfN 2016 annual meeting, organized by the countries that were affiliated with each poster.\nWe find that the poster popularity across countries is not significant compare to what is expected by chance.\nImport libraries and load data",
"%config InlineBackend.figure_format = 'retina'\n%matplotlib inline\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set_style('white')\n\nimport pandas as pd\n\n# Load data\ndf = pd.DataFrame.from_csv('./posterviewers_by_country.csv')\nkey_N = 'Number of people'",
"1. Summarize data by country",
"# 0. Count number of posters from each state\n# Calculate mean poster popularity\nstates = df['Country'].unique()\ndict_state_counts = {'Country':states,'count':np.zeros(len(states)),'popularity':np.zeros(len(states))}\nfor i, s in enumerate(states):\n dict_state_counts['count'][i] = int(sum(df['Country']==s))\n dict_state_counts['popularity'][i] = np.round(np.mean(df[df['Country']==s][key_N]),3)\ndf_counts = pd.DataFrame.from_dict(dict_state_counts)\n\n# Visualize dataframe\n# count = total number of posters counted affiliated with that country\n# popularity = average number of viewers at a poster affiliated with that country\ndf_counts.head()",
"2. Poster popularity vs. prevalence\nAcross states in the United States, we found a positive correlation between the number of posters from a state and the popularity of those posters. We debatably see this again across countries to a trending level of significance (1-tailed p-value = 0.06)",
"print sp.stats.spearmanr(np.log10(df_counts['count']),df_counts['popularity'])\nplt.figure(figsize=(3,3))\nplt.semilogx(df_counts['count'],df_counts['popularity'],'k.')\nplt.xlabel('Number of posters\\nin the state')\nplt.ylabel('Average number of viewers per poster')\nplt.ylim((-.1,3.6))\nplt.xlim((.9,1000))",
"3. Permutation tests: difference in popularity across countries\nIn this code, we test if the relative popularity / unpopularity observed for any country is outside what is expected by chance\nHere, the most popular and least popular countries are defined by a nonparametric statiscal test between the number of viewers at posters from their country, compared to posters from all other countries.",
"# Simulate randomized data\nNperm = 100\nN_posters = len(df)\nrand_statepop = np.zeros((Nperm,len(states)),dtype=np.ndarray)\nrand_statepopmean = np.zeros((Nperm,len(states)))\nfor i in range(Nperm):\n # Random permutation of posters, organized by state\n randperm_viewers = np.random.permutation(df[key_N].values)\n for j, s in enumerate(states):\n rand_statepop[i,j] = randperm_viewers[np.where(df['Country']==s)[0]]\n rand_statepopmean[i,j] = np.mean(randperm_viewers[np.where(df['Country']==s)[0]])\n\n# True data: Calculate all p-values for the difference between 1 state's popularity and the rest\nmin_N_posters = 10\nstates_big = states[np.where(df_counts['count']>=min_N_posters)[0]]\nN_big = len(states_big)\nt_true_all = np.zeros(N_big)\np_true_all = np.zeros(N_big)\nfor i, state in enumerate(states_big):\n t_true_all[i], _ = sp.stats.ttest_ind(df[df['Country']==state][key_N],df[df['Country']!=state][key_N])\n _, p_true_all[i] = sp.stats.mannwhitneyu(df[df['Country']==state][key_N],df[df['Country']!=state][key_N])\n \npmin_pop = np.min(p_true_all[np.where(t_true_all>0)[0]])\npmin_unpop = np.min(p_true_all[np.where(t_true_all<0)[0]])\n\nprint 'Most popular country: ', states_big[np.argmax(t_true_all)], '. p=', str(pmin_pop)\nprint 'Least popular country: ', states_big[np.argmin(t_true_all)], '. p=', str(pmin_unpop)\n\n# Calculate minimum p-values for each permutation\n\n# Calculate all p and t values\nt_rand_all = np.zeros((Nperm,N_big))\np_rand_all = np.zeros((Nperm,N_big))\npmin_pop_rand = np.zeros(Nperm)\npmin_unpop_rand = np.zeros(Nperm)\nfor i in range(Nperm):\n for j, state in enumerate(states_big):\n idx_use = range(len(states_big))\n idx_use.pop(j)\n t_rand_all[i,j], _ = sp.stats.ttest_ind(rand_statepop[i,j],np.hstack(rand_statepop[i,idx_use]))\n _, p_rand_all[i,j] = sp.stats.mannwhitneyu(rand_statepop[i,j],np.hstack(rand_statepop[i,idx_use]))\n \n # Identify the greatest significance of a state being more popular than the rest\n pmin_pop_rand[i] = np.min(p_rand_all[i][np.where(t_rand_all[i]>0)[0]])\n # Identify the greatest significance of a state being less popular than the rest\n pmin_unpop_rand[i] = np.min(p_rand_all[i][np.where(t_rand_all[i]<0)[0]])\n\n# Test if most popular and least popular countries are outside of expectation\nprint 'Chance of a state being more distinctly popular than Canada: '\nprint sum(i < pmin_pop for i in pmin_pop_rand) / float(len(pmin_pop_rand))\nprint 'Chance of a state being less distinctly popular than US: '\nprint sum(i < pmin_unpop for i in pmin_unpop_rand) / float(len(pmin_unpop_rand))"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
statsmodels/statsmodels.github.io
|
v0.12.2/examples/notebooks/generated/discrete_choice_example.ipynb
|
bsd-3-clause
|
[
"Discrete Choice Models\nFair's Affair data\nA survey of women only was conducted in 1974 by Redbook asking about extramarital affairs.",
"%matplotlib inline\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import logit\n\nprint(sm.datasets.fair.SOURCE)\n\nprint( sm.datasets.fair.NOTE)\n\ndta = sm.datasets.fair.load_pandas().data\n\ndta['affair'] = (dta['affairs'] > 0).astype(float)\nprint(dta.head(10))\n\nprint(dta.describe())\n\naffair_mod = logit(\"affair ~ occupation + educ + occupation_husb\"\n \"+ rate_marriage + age + yrs_married + children\"\n \" + religious\", dta).fit()\n\nprint(affair_mod.summary())",
"How well are we predicting?",
"affair_mod.pred_table()",
"The coefficients of the discrete choice model do not tell us much. What we're after is marginal effects.",
"mfx = affair_mod.get_margeff()\nprint(mfx.summary())\n\nrespondent1000 = dta.iloc[1000]\nprint(respondent1000)\n\nresp = dict(zip(range(1,9), respondent1000[[\"occupation\", \"educ\",\n \"occupation_husb\", \"rate_marriage\",\n \"age\", \"yrs_married\", \"children\",\n \"religious\"]].tolist()))\nresp.update({0 : 1})\nprint(resp)\n\nmfx = affair_mod.get_margeff(atexog=resp)\nprint(mfx.summary())",
"predict expects a DataFrame since patsy is used to select columns.",
"respondent1000 = dta.iloc[[1000]]\naffair_mod.predict(respondent1000)\n\naffair_mod.fittedvalues[1000]\n\naffair_mod.model.cdf(affair_mod.fittedvalues[1000])",
"The \"correct\" model here is likely the Tobit model. We have an work in progress branch \"tobit-model\" on github, if anyone is interested in censored regression models.\nExercise: Logit vs Probit",
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nsupport = np.linspace(-6, 6, 1000)\nax.plot(support, stats.logistic.cdf(support), 'r-', label='Logistic')\nax.plot(support, stats.norm.cdf(support), label='Probit')\nax.legend();\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nsupport = np.linspace(-6, 6, 1000)\nax.plot(support, stats.logistic.pdf(support), 'r-', label='Logistic')\nax.plot(support, stats.norm.pdf(support), label='Probit')\nax.legend();",
"Compare the estimates of the Logit Fair model above to a Probit model. Does the prediction table look better? Much difference in marginal effects?\nGeneralized Linear Model Example",
"print(sm.datasets.star98.SOURCE)\n\nprint(sm.datasets.star98.DESCRLONG)\n\nprint(sm.datasets.star98.NOTE)\n\ndta = sm.datasets.star98.load_pandas().data\nprint(dta.columns)\n\nprint(dta[['NABOVE', 'NBELOW', 'LOWINC', 'PERASIAN', 'PERBLACK', 'PERHISP', 'PERMINTE']].head(10))\n\nprint(dta[['AVYRSEXP', 'AVSALK', 'PERSPENK', 'PTRATIO', 'PCTAF', 'PCTCHRT', 'PCTYRRND']].head(10))\n\nformula = 'NABOVE + NBELOW ~ LOWINC + PERASIAN + PERBLACK + PERHISP + PCTCHRT '\nformula += '+ PCTYRRND + PERMINTE*AVYRSEXP*AVSALK + PERSPENK*PTRATIO*PCTAF'",
"Aside: Binomial distribution\nToss a six-sided die 5 times, what's the probability of exactly 2 fours?",
"stats.binom(5, 1./6).pmf(2)\n\nfrom scipy.special import comb\ncomb(5,2) * (1/6.)**2 * (5/6.)**3\n\nfrom statsmodels.formula.api import glm\nglm_mod = glm(formula, dta, family=sm.families.Binomial()).fit()\n\nprint(glm_mod.summary())",
"The number of trials",
"glm_mod.model.data.orig_endog.sum(1)\n\nglm_mod.fittedvalues * glm_mod.model.data.orig_endog.sum(1)",
"First differences: We hold all explanatory variables constant at their means and manipulate the percentage of low income households to assess its impact\non the response variables:",
"exog = glm_mod.model.data.orig_exog # get the dataframe\n\nmeans25 = exog.mean()\nprint(means25)\n\nmeans25['LOWINC'] = exog['LOWINC'].quantile(.25)\nprint(means25)\n\nmeans75 = exog.mean()\nmeans75['LOWINC'] = exog['LOWINC'].quantile(.75)\nprint(means75)",
"Again, predict expects a DataFrame since patsy is used to select columns.",
"resp25 = glm_mod.predict(pd.DataFrame(means25).T)\nresp75 = glm_mod.predict(pd.DataFrame(means75).T)\ndiff = resp75 - resp25",
"The interquartile first difference for the percentage of low income households in a school district is:",
"print(\"%2.4f%%\" % (diff[0]*100))\n\nnobs = glm_mod.nobs\ny = glm_mod.model.endog\nyhat = glm_mod.mu\n\nfrom statsmodels.graphics.api import abline_plot\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, ylabel='Observed Values', xlabel='Fitted Values')\nax.scatter(yhat, y)\ny_vs_yhat = sm.OLS(y, sm.add_constant(yhat, prepend=True)).fit()\nfig = abline_plot(model_results=y_vs_yhat, ax=ax)",
"Plot fitted values vs Pearson residuals\nPearson residuals are defined to be\n$$\\frac{(y - \\mu)}{\\sqrt{(var(\\mu))}}$$\nwhere var is typically determined by the family. E.g., binomial variance is $np(1 - p)$",
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, title='Residual Dependence Plot', xlabel='Fitted Values',\n ylabel='Pearson Residuals')\nax.scatter(yhat, stats.zscore(glm_mod.resid_pearson))\nax.axis('tight')\nax.plot([0.0, 1.0],[0.0, 0.0], 'k-');",
"Histogram of standardized deviance residuals with Kernel Density Estimate overlaid\nThe definition of the deviance residuals depends on the family. For the Binomial distribution this is\n$$r_{dev} = sign\\left(Y-\\mu\\right)*\\sqrt{2n(Y\\log\\frac{Y}{\\mu}+(1-Y)\\log\\frac{(1-Y)}{(1-\\mu)}}$$\nThey can be used to detect ill-fitting covariates",
"resid = glm_mod.resid_deviance\nresid_std = stats.zscore(resid)\nkde_resid = sm.nonparametric.KDEUnivariate(resid_std)\nkde_resid.fit()\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111, title=\"Standardized Deviance Residuals\")\nax.hist(resid_std, bins=25, density=True);\nax.plot(kde_resid.support, kde_resid.density, 'r');",
"QQ-plot of deviance residuals",
"fig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nfig = sm.graphics.qqplot(resid, line='r', ax=ax)"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
poppy-project/pypot
|
samples/notebooks/Record, Save, and Play Moves on a Poppy Creature.ipynb
|
gpl-3.0
|
[
"Record, Save, and Play Moves on a Poppy Creature\nThis notebook is still work in progress! Feedbacks are welcomed!\nIn this tutorial we will show how to:\n* record moves by direct demonstration on a Poppy Creature\n* save them to the disk - and re-load them\n* play, and re-play the best moves\nTo follow this notebook, you should already have installed everything needed to control a Poppy Creature. The examples below used a Poppy Ergo but then can be easily transposed to a Poppy Humanoid or to any other creatures.\nConnect to your Poppy Creature\nFirst, connect to your Poppy Creature and put it in its \"base\" position so you can easily record motions.\nHere we use a Poppy Ergo but you can replace it by a Poppy Humanoid.",
"from pypot.creatures import PoppyErgo\n\npoppy = PoppyErgo()\n\nfor m in poppy.motors:\n m.compliant = False\n m.goal_position = 0.0",
"Import the Move, Recorder and Player",
"# Import everything you need for recording, playing, saving, and loading Moves\n\n# Move: object used to represent a movement\n# MoveRecorder: object used to record a Move\n# MovePlayer: object used to play (and re-play) a Move\n\nfrom pypot.primitive.move import Move, MoveRecorder, MovePlayer",
"Create a Recorder for the robot Poppy",
"record_frequency = 50.0 # This means that a new position will be recorded 50 times per second.\nrecorded_motors = [poppy.m4, poppy.m5, poppy.m6] # We will record the position of the 3 last motors of the Ergo\n\n# You can also use alias for the recorded_motors\n# e.g. recorder = MoveRecorder(poppy, record_frequency, poppy.tip)\n# or even to record all motors position\n# recorder = MoveRecorder(poppy, record_frequency, poppy.motors)\n\nrecorder = MoveRecorder(poppy, record_frequency, recorded_motors)",
"Start the recording\nFirst, turn the recorded motors compliant, so you can freely move them:",
"for m in recorded_motors:\n m.compliant = True",
"Starts the recording when you are ready!",
"recorder.start()",
"Stop the recording\nStop it when you are done demonstrating the movement.",
"recorder.stop()",
"Turn back off the compliance.",
"for m in recorded_motors:\n m.compliant = False",
"Get the recorder Move and store it on the disk\nSave the recorded move on the text file named 'mymove.json'.",
"recorded_move = recorder.move\n\nwith open('mymove.json', 'w') as f:\n recorded_move.save(f)",
"Load a saved Move\nRe-load it from the file jsut as an example purpose.",
"with open('mymove.json') as f:\n loaded_move = Move.load(f)",
"Create a Move Player and Play Back a Recorded Move\nFirst, create the object used to re-play a recorded Move.",
"player = MovePlayer(poppy, loaded_move)",
"You can start the play back whenever you want:",
"player.start()",
"You can play your move as many times as you want. Note, that we use the wait_to_stop method to wait for the first play abck to end before running it again.",
"for _ in range(3):\n player.start()\n player.wait_to_stop()"
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.