repo_name
stringlengths 6
100
| path
stringlengths 4
191
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 935
727k
| license
stringclasses 15
values |
|---|---|---|---|---|---|
eig-2017/the-magical-csv-merge-machine
|
merge_machine/test_es.py
|
1
|
8679
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 16:42:41 2017
@author: m75380
# Ideas: Learn analysers and weights for blocking on ES directly
# Put all fields to learn blocking by exact match on other fields
https://www.elastic.co/guide/en/elasticsearch/reference/current/multi-fields.html
$ ./bin/elasticsearch
queries = self.current_queries
new_res = dict()
source_idxs = self.current_queries[0].history_pairs.keys()
for idx in source_idxs:
count = defaultdict(int)
for query in queries:
if query.history_pairs[idx]:
count[query.history_pairs[idx][0]] += 1
else:
count['nores'] += 1
new_res[idx] = sorted(list(count.items()), key=lambda x: x[1], reverse=True)[0][0]
"""
import pandas as pd
from es_connection import es, ic
from merge_machine import es_insert
from merge_machine.es_labeller import ConsoleLabeller
from merge_machine.es_match import es_linker
dir_path = 'data/sirene'
chunksize = 3000
file_len = 10*10**6
force_re_index = False
sirene_index_name = '123vivalalgerie2'
test_num = 0
if test_num == 0:
source_file_path = 'local_test_data/source.csv'
ref_file_path = ''
match_cols = [{'source': 'commune', 'ref': 'LIBCOM'},
{'source': 'lycees_sources', 'ref': 'NOMEN_LONG'}]
source_sep = ','
source_encoding = 'utf-8'
ref_table_name = sirene_index_name
# must_not_filters / NOMEN_LONG / ["ASSOCIATION", "ASSOC", 'ASS', 'PARENTS', 'CONSEIL', 'FCPE', 'FSE', 'FOYER', 'LYCEENS', 'MAISON']
elif test_num == 1:
source_file_path = 'local_test_data/integration_5/data_ugly.csv'
match_cols = [{'source': 'VILLE', 'ref': 'L6_NORMALISEE'},
{'source': 'ETABLISSEMENT', 'ref': 'NOMEN_LONG'}]
source_sep = ';'
source_encoding = 'windows-1252'
ref_table_name = sirene_index_name
elif test_num == 2:
# ALIM to SIRENE
source_file_path = 'local_test_data/integration_3/export_alimconfiance.csv'
match_cols = [{'source': 'Libelle_commune', 'ref': 'LIBCOM'},
#{'source': 'Libelle_commune', 'ref': 'L6_NORMALISEE'},
{'source': 'ods_adresse', 'ref': 'L4_NORMALISEE'},
{'source': 'APP_Libelle_etablissement', 'ref': ('L1_NORMALISEE',
'ENSEIGNE', 'NOMEN_LONG')}]
source_sep = ';'
source_encoding = 'utf-8'
ref_table_name = sirene_index_name
elif test_num == 3:
# HAL to GRID
source_file_path = 'local_test_data/integration_4/hal.csv'
match_cols = [{
"source": ("parentName_s", "label_s"),
"ref": ("Name", "City")
}]
source_sep = '\t'
source_encoding = 'utf-8'
ref_table_name = '01c670508e478300b9ab7c639a76c871'
elif test_num == 4:
source_file_path = 'local_test_data/integration_6_hal_2/2017_09_15_HAL_09_08_2015_Avec_RecageEchantillon.csv'
match_cols = [{
"source": ("parentName_s", "label_s"),
"ref": ("Name", "City")
}]
source_sep = ';'
source_encoding = 'ISO-8859-1'
ref_table_name = '01c670508e478300b9ab7c639a76c871'
elif test_num == 5:
# Test on very short file
source_file_path = 'local_test_data/source_5_lines.csv'
match_cols = [{'source': 'commune', 'ref': 'LIBCOM'},
{'source': 'lycees_sources', 'ref': 'NOMEN_LONG'}]
source_sep = ','
source_encoding = 'utf-8'
ref_table_name = sirene_index_name
else:
raise Exception('Not a valid test number')
source = pd.read_csv(source_file_path,
sep=source_sep, encoding=source_encoding,
dtype=str, nrows=chunksize)
source = source.where(source.notnull(), '')
# =============================================================================
# Define the columns to index
# =============================================================================
if test_num in [0,1,2,5]:
columns_to_index = {
'SIRET': {},
'SIREN': {},
'NIC': {},
'L1_NORMALISEE': {
'french', 'integers', 'n_grams', 'city'
},
'L4_NORMALISEE': {
'french', 'integers', 'n_grams', 'city'
},
'L6_NORMALISEE': {
'french', 'integers', 'n_grams', 'city'
},
'L1_DECLAREE': {
'french', 'integers', 'n_grams', 'city'
},
'L4_DECLAREE': {
'french', 'integers', 'n_grams', 'city'
},
'L6_DECLAREE': {
'french', 'integers', 'n_grams', 'city'
},
'LIBCOM': {
'french', 'n_grams', 'city'
},
'CEDEX': {},
'ENSEIGNE': {
'french', 'integers', 'n_grams', 'city'
},
'NOMEN_LONG': {
'french', 'integers', 'n_grams', 'city'
},
#Keyword only 'LIBNATETAB': {},
'LIBAPET': {},
'PRODEN': {},
'PRODET': {}
}
elif test_num in [3, 4]:
columns_to_index = {
"Name": {
'french', 'whitespace', 'integers', 'end_n_grams', 'n_grams', 'city'
},
"City": {
'french', 'whitespace', 'integers', 'end_n_grams', 'n_grams', 'city'
}
}
# =============================================================================
# Index the referential
# =============================================================================
import json
testing = True
if force_re_index or (not ic.exists(ref_table_name)):
if ic.exists(ref_table_name):
ic.delete(ref_table_name)
ref_gen = pd.read_csv(ref_file_path,
usecols=columns_to_index.keys(),
dtype=str, chunksize=40000)
index_settings = es_insert.gen_index_settings(columns_to_index)
ic.create(ref_table_name, body=json.dumps(index_settings))
es_insert.index(ref_gen, ref_table_name, testing)
# =============================================================================
# Initiate the labellers
# =============================================================================
if test_num == 2:
columns_certain_match = {'source': ['SIRET'], 'ref': ['SIRET']}
labellers = dict()
for i in range(3):
labellers[i] = ConsoleLabeller(es, source, ref_table_name, match_cols, columns_to_index)
labellers[i].auto_label(columns_certain_match)
# import cProfile
# cProfile.run("labeller.auto_label(columns_certain_match)", "restats")
#
# import pstats
# p = pstats.Stats('restats')
# p.strip_dirs().sort_stats(-1).print_stats()
elif test_num == 4:
columns_certain_match = {'source': ['grid'], 'ref': ['ID']}
labeller = ConsoleLabeller(es, source, ref_table_name, match_cols, columns_to_index)
else:
labeller = ConsoleLabeller(es, source, ref_table_name, match_cols, columns_to_index)
labeller.console_labeller()
(new_source, _) = es_linker(es, source, labeller.export_best_params())
for (i, row) in new_source.iloc[:20].iterrows():
print('*'*50)
for match in match_cols:
print(match['source'], '->', row[match['source']])
print(match['ref'], '->', row[match['ref'] + '__REF'])
print('\n')
# if i == 15:
# print('Updating musts')
# if test_num == 0:
# labeller.update_musts({'NOMEN_LONG': ['lycee']},
# {'NOMEN_LONG': ['ass', 'association', 'sportive',
# 'foyer', 'maison', 'amicale']})
best_query = labeller.current_queries[0]
print(best_query._as_tuple())
print('Precision:', best_query.precision)
print('Recall:', best_query.recall)
print('Score:', best_query.score)
assert False
from collections import defaultdict
# Majority vote on labellers
pairs_count = dict()
for labeller in labellers.values():
best_query = labeller.current_queries[0]
for source_id, pairs in best_query.history_pairs.items():
if source_id not in pairs_count:
pairs_count[source_id] = defaultdict(int)
if pairs:
pair = pairs[0]
pairs_count[source_id][pair] += 1
res = dict()
for source_id, pair_count in pairs_count.items():
if pair_count:
res[source_id] = sorted(list(pair_count.items()), key=lambda x: x[1])[-1][0]
else:
res[source_id] = None
|
mit
|
elkingtonmcb/scikit-learn
|
sklearn/neighbors/regression.py
|
100
|
11017
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
bsd-3-clause
|
shangwuhencc/scikit-learn
|
examples/plot_kernel_approximation.py
|
262
|
8004
|
"""
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
deepesch/scikit-learn
|
examples/svm/plot_svm_margin.py
|
318
|
2328
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
hjanime/VisTrails
|
vistrails/packages/matplotlib/artists.py
|
3
|
230248
|
from __future__ import division
from vistrails.core.modules.vistrails_module import Module
from bases import MplProperties
import matplotlib.artist
import matplotlib.cbook
def translate_color(c):
return c.tuple
def translate_MplLine2DProperties_marker(val):
translate_dict = {'caretright': 5, 'star': '*', 'point': '.', 'mathtext': '$...$', 'triangle_right': '>', 'tickup': 2, 'hexagon1': 'h', 'plus': '+', 'hline': '_', 'vline': '|', 'tickdown': 3, 'nothing': ' ', 'caretup': 6, 'caretleft': 4, 'pentagon': 'p', 'tri_left': '3', 'tickleft': 0, 'tickright': 1, 'tri_down': '1', 'thin_diamond': 'd', 'diamond': 'D', 'caretdown': 7, 'hexagon2': 'H', 'tri_up': '2', 'square': 's', 'x': 'x', 'triangle_down': 'v', 'triangle_up': '^', 'octagon': '8', 'tri_right': '4', 'circle': 'o', 'pixel': ',', 'triangle_left': '<'}
return translate_dict[val]
def translate_MplLine2DProperties_linestyle(val):
translate_dict = {'solid': '-', 'dashed': '--', 'dash_dot': '-.', 'dotted': ':', 'draw nothing': ''}
return translate_dict[val]
def translate_MplAxesProperties_anchor(val):
translate_dict = {'right': 'E', 'Center': 'C', 'bottom right': 'SE', 'top right': 'NE', 'bottom': 'S', 'top left': 'NW', 'top': 'N', 'bottom left': 'SW', 'left': 'W'}
return translate_dict[val]
class MplArtistProperties(MplProperties):
"""
Abstract base class for someone who renders into a
:class:`FigureCanvas`.
"""
_input_ports = [
("picker", "basic:String",
{'optional': True, 'docstring': "Set the epsilon for picking used by this artist\n\npicker can be one of the following:\n\nNone: picking is disabled for this artist (default)\n\nA boolean: if True then picking will be enabled and the artist will fire a pick event if the mouse event is over the artist\n\nA float: if picker is a number it is interpreted as an epsilon tolerance in points and the artist will fire off an event if it's data is within epsilon of the mouse event. For some artists like lines and patch collections, the artist may provide additional data to the pick event that is generated, e.g. the indices of the data within epsilon of the pick event\n\nA function: if picker is callable, it is a user supplied function which determines whether the artist is hit by the mouse event:\n\nhit, props = picker(artist, mouseevent)\n\nto determine the hit test. if the mouse event is over the artist, return hit=True and props is a dictionary of properties you want added to the PickEvent attributes."}),
("contains", "basic:String",
{'optional': True, 'docstring': 'Replace the contains test used by this artist. The new picker should be a callable function which determines whether the artist is hit by the mouse event:\n\nhit, props = picker(artist, mouseevent)\n\nIf the mouse event is over the artist, return hit = True and props is a dictionary of properties you want returned with the contains test.'}),
("clip_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether artist uses clipping.'}),
("agg_filter", "basic:String",
{'optional': True, 'docstring': 'set agg_filter fuction.'}),
("visible", "basic:Boolean",
{'optional': True, 'docstring': "Set the artist's visiblity."}),
("url", "basic:String",
{'optional': True, 'docstring': 'Sets the url for the artist'}),
("transform", "basic:String",
{'optional': True, 'docstring': 'Set the :class:`~matplotlib.transforms.Transform` instance used by this artist.'}),
("axes", "basic:String",
{'optional': True, 'docstring': 'Set the :class:`~matplotlib.axes.Axes` instance in which the artist resides, if any.'}),
("clip_box", "basic:String",
{'optional': True, 'docstring': "Set the artist's clip :class:`~matplotlib.transforms.Bbox`."}),
("clip_path", "basic:String",
{'optional': True, 'docstring': "Set the artist's clip path, which may be:\n\na :class:`~matplotlib.patches.Patch` (or subclass) instance\n\n\n\nNone, to remove the clipping path\n\nFor efficiency, if the path happens to be an axis-aligned rectangle, this method will set the clipping box to the corresponding rectangle and set the clipping path to None."}),
("lod", "basic:Boolean",
{'optional': True, 'docstring': 'Set Level of Detail on or off. If on, the artists may examine things like the pixel width of the axes and draw a subset of their contents accordingly'}),
("label", "basic:String",
{'optional': True, 'docstring': 'Set the label to s for auto legend.'}),
("rasterized", "basic:Boolean",
{'optional': True, 'docstring': "Force rasterized (bitmap) drawing in vector backend output.\n\nDefaults to None, which implies the backend's default behavior"}),
("gid", "basic:String",
{'optional': True, 'docstring': 'Sets the (group) id for the artist'}),
("zorder", "basic:String",
{'optional': True, 'docstring': 'Set the zorder for the artist. Artists with lower zorder values are drawn first.'}),
("snap", "basic:String",
{'optional': True, 'docstring': 'Sets the snap setting which may be:\n\nTrue: snap vertices to the nearest pixel center\n\nFalse: leave vertices as-is\n\nNone: (auto) If the path contains only rectilinear line segments, round to the nearest pixel center\n\nOnly supported by the Agg and MacOSX backends.'}),
("alpha", "basic:Float",
{'optional': True, 'docstring': 'Set the alpha value used for blending - not supported on all backends.'}),
("animated", "basic:Boolean",
{'optional': True, 'docstring': "Set the artist's animation state."}),
("figure", "basic:String",
{'optional': True, 'docstring': 'Set the :class:`~matplotlib.figure.Figure` instance the artist belongs to.'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplArtistProperties)")]
class Artist(MplProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplArtistProperties.Artist()
self.set_output("value", artist)
MplProperties.compute(self, artist)
if self.has_input('picker'):
artist.props['picker'] = self.get_input('picker')
if self.has_input('contains'):
artist.props['contains'] = self.get_input('contains')
if self.has_input('clip_on'):
artist.props['clip_on'] = self.get_input('clip_on')
if self.has_input('agg_filter'):
artist.props['agg_filter'] = self.get_input('agg_filter')
if self.has_input('visible'):
artist.props['visible'] = self.get_input('visible')
if self.has_input('url'):
artist.props['url'] = self.get_input('url')
if self.has_input('transform'):
artist.props['transform'] = self.get_input('transform')
if self.has_input('axes'):
artist.props['axes'] = self.get_input('axes')
if self.has_input('clip_box'):
artist.props['clip_box'] = self.get_input('clip_box')
if self.has_input('clip_path'):
artist.props['clip_path'] = self.get_input('clip_path')
if self.has_input('lod'):
artist.props['lod'] = self.get_input('lod')
if self.has_input('label'):
artist.props['label'] = self.get_input('label')
if self.has_input('rasterized'):
artist.props['rasterized'] = self.get_input('rasterized')
if self.has_input('gid'):
artist.props['gid'] = self.get_input('gid')
if self.has_input('zorder'):
artist.props['zorder'] = self.get_input('zorder')
if self.has_input('snap'):
artist.props['snap'] = self.get_input('snap')
if self.has_input('alpha'):
artist.props['alpha'] = self.get_input('alpha')
if self.has_input('animated'):
artist.props['animated'] = self.get_input('animated')
if self.has_input('figure'):
artist.props['figure'] = self.get_input('figure')
class Mpl_AxesImageBaseProperties(MplArtistProperties):
"""None
"""
_input_ports = [
("origin", "basic:String",
{'optional': True}),
("resample", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether or not image resampling is used'}),
("norm", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("filternorm", "basic:String",
{'optional': True, 'docstring': 'Set whether the resize filter norms the weights -- see help for imshow'}),
("ax", "basic:String",
{'optional': True}),
("alpha", "basic:Float",
{'optional': True, 'docstring': 'Set the alpha value used for blending - not supported on all backends'}),
("array", "basic:String",
{'optional': True, 'docstring': 'Retained for backwards compatibility - use set_data instead'}),
("data", "basic:String",
{'optional': True, 'docstring': 'Set the image array'}),
("filterrad", "basic:Float",
{'optional': True, 'docstring': 'Set the resize filter radius only applicable to some interpolation schemes -- see help for imshow'}),
("interpolation", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the interpolation method the image uses when resizing.\n\nif None, use a value from rc setting. If 'none', the image is shown as is without interpolating. 'none' is only supported in agg, ps and pdf backends and will fall back to 'nearest' mode for other backends.", 'values': "[['nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', 'none', '']]", 'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(Mpl_AxesImageBaseProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = Mpl_AxesImageBaseProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('origin'):
artist.constructor_props['origin'] = self.get_input('origin')
if self.has_input('resample'):
artist.props['resample'] = self.get_input('resample')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('filternorm'):
artist.props['filternorm'] = self.get_input('filternorm')
if self.has_input('ax'):
artist.constructor_props['ax'] = self.get_input('ax')
if self.has_input('alpha'):
artist.props['alpha'] = self.get_input('alpha')
if self.has_input('array'):
artist.props['array'] = self.get_input('array')
if self.has_input('data'):
artist.props['data'] = self.get_input('data')
if self.has_input('filterrad'):
artist.props['filterrad'] = self.get_input('filterrad')
if self.has_input('interpolation'):
artist.props['interpolation'] = self.get_input('interpolation')
class MplAxesImageProperties(Mpl_AxesImageBaseProperties):
"""None
"""
_input_ports = [
("origin", "basic:String",
{'optional': True}),
("resample", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("norm", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("filterrad", "basic:Float",
{'optional': True, 'defaults': "['4.0']"}),
("extent", "basic:String",
{'optional': True, 'docstring': 'extent is data axes (left, right, bottom, top) for making image plots\n\nThis updates ax.dataLim, and, if autoscaling, sets viewLim to tightly fit the image, regardless of dataLim. Autoscaling state is not changed, so following this with ax.autoscale_view will redo the autoscaling in accord with dataLim.'}),
("ax", "basic:String",
{'optional': True}),
("filternorm", "basic:Integer",
{'optional': True, 'defaults': "['1']"}),
("interpolation", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAxesImageProperties)")]
class Artist(Mpl_AxesImageBaseProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
Mpl_AxesImageBaseProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAxesImageProperties.Artist()
self.set_output("value", artist)
Mpl_AxesImageBaseProperties.compute(self, artist)
if self.has_input('origin'):
artist.constructor_props['origin'] = self.get_input('origin')
if self.has_input('resample'):
artist.constructor_props['resample'] = self.get_input('resample')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('filterrad'):
artist.constructor_props['filterrad'] = self.get_input('filterrad')
if self.has_input('extent'):
artist.props['extent'] = self.get_input('extent')
if self.has_input('ax'):
artist.constructor_props['ax'] = self.get_input('ax')
if self.has_input('filternorm'):
artist.constructor_props['filternorm'] = self.get_input('filternorm')
if self.has_input('interpolation'):
artist.constructor_props['interpolation'] = self.get_input('interpolation')
class MplNonUniformImageProperties(MplAxesImageProperties):
"""None
"""
_input_ports = [
("norm", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("filternorm", "basic:String",
{'optional': True}),
("ax", "basic:String",
{'optional': True}),
("array", "basic:String",
{'optional': True}),
("data", "basic:String",
{'optional': True, 'docstring': 'Set the grid for the pixel centers, and the pixel values.'}),
("filterrad", "basic:String",
{'optional': True}),
("interpolation", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplNonUniformImageProperties)")]
class Artist(MplAxesImageProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplAxesImageProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplNonUniformImageProperties.Artist()
self.set_output("value", artist)
MplAxesImageProperties.compute(self, artist)
if self.has_input('norm'):
artist.props['norm'] = self.get_input('norm')
if self.has_input('cmap'):
artist.props['cmap'] = self.get_input('cmap')
if self.has_input('filternorm'):
artist.props['filternorm'] = self.get_input('filternorm')
if self.has_input('ax'):
artist.constructor_props['ax'] = self.get_input('ax')
if self.has_input('array'):
artist.props['array'] = self.get_input('array')
if self.has_input('data'):
artist.props['data'] = self.get_input('data')
if self.has_input('filterrad'):
artist.props['filterrad'] = self.get_input('filterrad')
if self.has_input('interpolation'):
artist.props['interpolation'] = self.get_input('interpolation')
class MplBboxImageProperties(Mpl_AxesImageBaseProperties):
"""The Image class whose size is determined by the given bbox.
"""
_input_ports = [
("origin", "basic:String",
{'optional': True}),
("interp_at_native", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("resample", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("cmap", "basic:String",
{'optional': True}),
("filternorm", "basic:Integer",
{'optional': True, 'defaults': "['1']"}),
("norm", "basic:String",
{'optional': True}),
("interpolation", "basic:String",
{'optional': True}),
("filterrad", "basic:Float",
{'optional': True, 'defaults': "['4.0']"}),
("bbox", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplBboxImageProperties)")]
class Artist(Mpl_AxesImageBaseProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
Mpl_AxesImageBaseProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplBboxImageProperties.Artist()
self.set_output("value", artist)
Mpl_AxesImageBaseProperties.compute(self, artist)
if self.has_input('origin'):
artist.constructor_props['origin'] = self.get_input('origin')
if self.has_input('interp_at_native'):
artist.constructor_props['interp_at_native'] = self.get_input('interp_at_native')
if self.has_input('resample'):
artist.constructor_props['resample'] = self.get_input('resample')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('filternorm'):
artist.constructor_props['filternorm'] = self.get_input('filternorm')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
if self.has_input('interpolation'):
artist.constructor_props['interpolation'] = self.get_input('interpolation')
if self.has_input('filterrad'):
artist.constructor_props['filterrad'] = self.get_input('filterrad')
if self.has_input('bbox'):
artist.constructor_props['bbox'] = self.get_input('bbox')
class MplPcolorImageProperties(MplArtistProperties):
"""
Make a pcolor-style plot with an irregular rectangular grid.
This uses a variation of the original irregular image code,
and it is used by pcolorfast for the corresponding grid type.
"""
_input_ports = [
("A", "basic:String",
{'optional': True}),
("ax", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("x", "basic:String",
{'optional': True}),
("y", "basic:String",
{'optional': True}),
("alpha", "basic:Float",
{'optional': True, 'docstring': 'Set the alpha value used for blending - not supported on all backends'}),
("array", "basic:String",
{'optional': True}),
("data", "basic:String",
{'optional': True}),
("norm", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPcolorImageProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPcolorImageProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('A'):
artist.constructor_props['A'] = self.get_input('A')
if self.has_input('ax'):
artist.constructor_props['ax'] = self.get_input('ax')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('x'):
artist.constructor_props['x'] = self.get_input('x')
if self.has_input('y'):
artist.constructor_props['y'] = self.get_input('y')
if self.has_input('alpha'):
artist.props['alpha'] = self.get_input('alpha')
if self.has_input('array'):
artist.props['array'] = self.get_input('array')
if self.has_input('data'):
artist.props['data'] = self.get_input('data')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
class MplFigureImageProperties(MplArtistProperties):
"""None
"""
_input_ports = [
("origin", "basic:String",
{'optional': True}),
("offsetx", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
("offsety", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
("cmap", "basic:String",
{'optional': True}),
("fig", "basic:String",
{'optional': True}),
("array", "basic:String",
{'optional': True, 'docstring': 'Deprecated; use set_data for consistency with other image types.'}),
("data", "basic:String",
{'optional': True, 'docstring': 'Set the image array.'}),
("norm", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplFigureImageProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplFigureImageProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('origin'):
artist.constructor_props['origin'] = self.get_input('origin')
if self.has_input('offsetx'):
artist.constructor_props['offsetx'] = self.get_input('offsetx')
if self.has_input('offsety'):
artist.constructor_props['offsety'] = self.get_input('offsety')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('fig'):
artist.constructor_props['fig'] = self.get_input('fig')
if self.has_input('array'):
artist.props['array'] = self.get_input('array')
if self.has_input('data'):
artist.props['data'] = self.get_input('data')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
class MplCollectionProperties(MplArtistProperties):
"""
Base class for Collections. Must be subclassed to be usable.
All properties in a collection must be sequences or scalars;
if scalars, they will be converted to sequences. The
property of the ith element of the collection is::
prop[i % len(props)]
Keyword arguments and default values:
* *edgecolors*: None
* *facecolors*: None
* *linewidths*: None
* *antialiaseds*: None
* *offsets*: None
* *transOffset*: transforms.IdentityTransform()
* *offset_position*: 'screen' (default) or 'data'
* *norm*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *cmap*: None (optional for
:class:`matplotlib.cm.ScalarMappable`)
* *hatch*: None
*offsets* and *transOffset* are used to translate the patch after
rendering (default no offsets). If offset_position is 'screen'
(default) the offset is applied after the master transform has
been applied, that is, the offsets are in screen coordinates. If
offset_position is 'data', the offset is applied before the master
transform, i.e., the offsets are in data coordinates.
If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds*
are None, they default to their :data:`matplotlib.rcParams` patch
setting, in sequence form.
The use of :class:`~matplotlib.cm.ScalarMappable` is optional. If
the :class:`~matplotlib.cm.ScalarMappable` matrix _A is not None
(ie a call to set_array has been made), at draw time a call to
scalar mappable will be made to set the face colors.
"""
_input_ports = [
("transOffset", "basic:String",
{'optional': True}),
("edgecolor", "basic:List",
{'optional': True, 'docstring': "Set the edgecolor(s) of the collection. c can be a matplotlib color arg (all patches have same color), or a sequence of rgba tuples; if it is a sequence the patches will cycle through the sequence.\n\nIf c is 'face', the edge color will always be the same as the face color. If it is 'none', the patch boundary will not be drawn."}),
("offset_position", "basic:String",
{'optional': True, 'docstring': "Set how offsets are applied. If offset_position is 'screen' (default) the offset is applied after the master transform has been applied, that is, the offsets are in screen coordinates. If offset_position is 'data', the offset is applied before the master transform, i.e., the offsets are in data coordinates."}),
("edgecolors", "basic:String",
{'optional': True}),
("facecolor", "basic:List",
{'optional': True, 'docstring': "Set the facecolor(s) of the collection. c can be a matplotlib color arg (all patches have same color), or a sequence of rgba tuples; if it is a sequence the patches will cycle through the sequence.\n\nIf c is 'none', the patch will not be filled."}),
("linestyles", "basic:String",
{'optional': True, 'defaults': "['solid']"}),
("offsetsSequence", "basic:List",
{'optional': True, 'docstring': 'Set the offsets for the collection. offsets can be a scalar or a sequence.'}),
("offsetsScalar", "basic:Float",
{'docstring': 'Set the offsets for the collection. offsets can be a scalar or a sequence.', 'optional': True}),
("color", "basic:List",
{'optional': True, 'docstring': 'Set both the edgecolor and the facecolor. .. seealso:\n\n:meth:`set_facecolor`, :meth:`set_edgecolor` For setting the edge or face color individually.'}),
("pickradius", "basic:String",
{'optional': True}),
("antialiaseds", "basic:String",
{'optional': True}),
("linewidths", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("antialiasedSequence", "basic:List",
{'optional': True, 'docstring': 'Set the antialiasing state for rendering.'}),
("antialiasedScalar", "basic:Boolean",
{'docstring': 'Set the antialiasing state for rendering.', 'optional': True}),
("urls", "basic:String",
{'optional': True}),
("hatch", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the hatching pattern\n\nhatch can be one of:\n\n/ - diagonal hatching \\ - back diagonal | - vertical - - horizontal + - crossed x - crossed diagonal o - small circle O - large circle . - dots * - stars\n\nLetters can be combined, in which case all the specified hatchings are done. If same letter repeats, it increases the density of hatching of that pattern.\n\nHatching is supported in the PostScript, PDF, SVG and Agg backends only.\n\nUnlike other properties such as linewidth and colors, hatching can only be specified for the collection as a whole, not separately for each member.', 'values': '[[\'/\', "\'\\\\\'", "\'", "\'", \'-\', \'+\', \'x\', \'o\', \'O\', \'.\', \'*\']]', 'optional': True}),
("alpha", "basic:Float",
{'optional': True, 'docstring': 'Set the alpha tranparencies of the collection. alpha must be a float or None.'}),
("paths", "basic:String",
{'optional': True}),
("linewidthSequence", "basic:List",
{'optional': True, 'docstring': 'Set the linewidth(s) for the collection. lw can be a scalar or a sequence; if it is a sequence the patches will cycle through the sequence'}),
("linewidthScalar", "basic:Float",
{'docstring': 'Set the linewidth(s) for the collection. lw can be a scalar or a sequence; if it is a sequence the patches will cycle through the sequence', 'optional': True}),
("linestyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the linestyle(s) for the collection.', 'values': "[['solid', ('dashed', 'dashdot', 'dotted'), '(offset, on-off-dash-seq)']]", 'optional': True}),
("facecolors", "basic:String",
{'optional': True}),
("norm", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplCollectionProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplCollectionProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('transOffset'):
artist.constructor_props['transOffset'] = self.get_input('transOffset')
if self.has_input('edgecolor'):
artist.props['edgecolor'] = self.get_input('edgecolor')
if self.has_input('offset_position'):
artist.props['offset_position'] = self.get_input('offset_position')
if self.has_input('edgecolors'):
artist.constructor_props['edgecolors'] = self.get_input('edgecolors')
if self.has_input('facecolor'):
artist.props['facecolor'] = self.get_input('facecolor')
if self.has_input('linestyles'):
artist.constructor_props['linestyles'] = self.get_input('linestyles')
if self.has_input('offsetsSequence'):
artist.props['offsets'] = self.get_input('offsetsSequence')
elif self.has_input('offsetsScalar'):
artist.props['offsets'] = self.get_input('offsetsScalar')
if self.has_input('color'):
artist.props['color'] = self.get_input('color')
if self.has_input('pickradius'):
artist.props['pickradius'] = self.get_input('pickradius')
if self.has_input('antialiaseds'):
artist.constructor_props['antialiaseds'] = self.get_input('antialiaseds')
if self.has_input('linewidths'):
artist.constructor_props['linewidths'] = self.get_input('linewidths')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('antialiasedSequence'):
artist.props['antialiased'] = self.get_input('antialiasedSequence')
elif self.has_input('antialiasedScalar'):
artist.props['antialiased'] = self.get_input('antialiasedScalar')
if self.has_input('urls'):
artist.props['urls'] = self.get_input('urls')
if self.has_input('hatch'):
artist.props['hatch'] = self.get_input('hatch')
if self.has_input('alpha'):
artist.props['alpha'] = self.get_input('alpha')
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('linewidthSequence'):
artist.props['linewidth'] = self.get_input('linewidthSequence')
elif self.has_input('linewidthScalar'):
artist.props['linewidth'] = self.get_input('linewidthScalar')
if self.has_input('linestyle'):
artist.props['linestyle'] = self.get_input('linestyle')
if self.has_input('facecolors'):
artist.constructor_props['facecolors'] = self.get_input('facecolors')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
class MplPathCollectionProperties(MplCollectionProperties):
"""
This is the most basic :class:`Collection` subclass.
"""
_input_ports = [
("paths", "basic:String",
{'optional': True}),
("sizes", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPathCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPathCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplPolyCollectionProperties(MplCollectionProperties):
"""None
"""
_input_ports = [
("paths", "basic:String",
{'optional': True, 'docstring': 'This allows one to delay initialization of the vertices.'}),
("verts", "basic:String",
{'optional': True, 'docstring': 'This allows one to delay initialization of the vertices.'}),
("closed", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("sizes", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPolyCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPolyCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('verts'):
artist.props['verts'] = self.get_input('verts')
if self.has_input('closed'):
artist.constructor_props['closed'] = self.get_input('closed')
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplBrokenBarHCollectionProperties(MplPolyCollectionProperties):
"""
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
"""
_input_ports = [
("xranges", "basic:String",
{'optional': True}),
("yrange", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplBrokenBarHCollectionProperties)")]
class Artist(MplPolyCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPolyCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplBrokenBarHCollectionProperties.Artist()
self.set_output("value", artist)
MplPolyCollectionProperties.compute(self, artist)
if self.has_input('xranges'):
artist.constructor_props['xranges'] = self.get_input('xranges')
if self.has_input('yrange'):
artist.constructor_props['yrange'] = self.get_input('yrange')
class MplRegularPolyCollectionProperties(MplCollectionProperties):
"""Draw a collection of regular polygons with *numsides*.
"""
_input_ports = [
("numsides", "basic:String",
{'optional': True}),
("rotation", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
("sizes", "basic:String",
{'optional': True, 'defaults': "['(1,)']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplRegularPolyCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplRegularPolyCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('numsides'):
artist.constructor_props['numsides'] = self.get_input('numsides')
if self.has_input('rotation'):
artist.constructor_props['rotation'] = self.get_input('rotation')
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplStarPolygonCollectionProperties(MplRegularPolyCollectionProperties):
"""
Draw a collection of regular stars with *numsides* points.
"""
_input_ports = [
("numsides", "basic:String",
{'optional': True}),
("rotation", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
("sizes", "basic:String",
{'optional': True, 'defaults': "['(1,)']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplStarPolygonCollectionProperties)")]
class Artist(MplRegularPolyCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplRegularPolyCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplStarPolygonCollectionProperties.Artist()
self.set_output("value", artist)
MplRegularPolyCollectionProperties.compute(self, artist)
if self.has_input('numsides'):
artist.constructor_props['numsides'] = self.get_input('numsides')
if self.has_input('rotation'):
artist.constructor_props['rotation'] = self.get_input('rotation')
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplAsteriskPolygonCollectionProperties(MplRegularPolyCollectionProperties):
"""
Draw a collection of regular asterisks with *numsides* points.
"""
_input_ports = [
("numsides", "basic:String",
{'optional': True}),
("rotation", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
("sizes", "basic:String",
{'optional': True, 'defaults': "['(1,)']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAsteriskPolygonCollectionProperties)")]
class Artist(MplRegularPolyCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplRegularPolyCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAsteriskPolygonCollectionProperties.Artist()
self.set_output("value", artist)
MplRegularPolyCollectionProperties.compute(self, artist)
if self.has_input('numsides'):
artist.constructor_props['numsides'] = self.get_input('numsides')
if self.has_input('rotation'):
artist.constructor_props['rotation'] = self.get_input('rotation')
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplLineCollectionProperties(MplCollectionProperties):
"""
All parameters must be sequences or scalars; if scalars, they will
be converted to sequences. The property of the ith line
segment is::
prop[i % len(props)]
i.e., the properties cycle if the ``len`` of props is less than the
number of segments.
"""
_input_ports = [
("paths", "basic:String",
{'optional': True}),
("antialiaseds", "basic:String",
{'optional': True}),
("linestyles", "basic:String",
{'optional': True, 'defaults': "['solid']"}),
("offsets", "basic:String",
{'optional': True}),
("color", "basic:List",
{'optional': True, 'docstring': 'Set the color(s) of the line collection. c can be a matplotlib color arg (all patches have same color), or a sequence or rgba tuples; if it is a sequence the patches will cycle through the sequence.'}),
("segments", "basic:String",
{'optional': True}),
("linewidths", "basic:String",
{'optional': True}),
("colors", "basic:String",
{'optional': True}),
("cmap", "basic:String",
{'optional': True}),
("transOffset", "basic:String",
{'optional': True}),
("verts", "basic:String",
{'optional': True}),
("pickradius", "basic:Integer",
{'optional': True, 'defaults': "['5']"}),
("norm", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplLineCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplLineCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('antialiaseds'):
artist.constructor_props['antialiaseds'] = self.get_input('antialiaseds')
if self.has_input('linestyles'):
artist.constructor_props['linestyles'] = self.get_input('linestyles')
if self.has_input('offsets'):
artist.constructor_props['offsets'] = self.get_input('offsets')
if self.has_input('color'):
artist.props['color'] = self.get_input('color')
if self.has_input('segments'):
artist.props['segments'] = self.get_input('segments')
if self.has_input('linewidths'):
artist.constructor_props['linewidths'] = self.get_input('linewidths')
if self.has_input('colors'):
artist.constructor_props['colors'] = self.get_input('colors')
if self.has_input('cmap'):
artist.constructor_props['cmap'] = self.get_input('cmap')
if self.has_input('transOffset'):
artist.constructor_props['transOffset'] = self.get_input('transOffset')
if self.has_input('verts'):
artist.props['verts'] = self.get_input('verts')
if self.has_input('pickradius'):
artist.constructor_props['pickradius'] = self.get_input('pickradius')
if self.has_input('norm'):
artist.constructor_props['norm'] = self.get_input('norm')
class MplCircleCollectionProperties(MplCollectionProperties):
"""
A collection of circles, drawn using splines.
"""
_input_ports = [
("sizes", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplCircleCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplCircleCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('sizes'):
artist.constructor_props['sizes'] = self.get_input('sizes')
class MplEllipseCollectionProperties(MplCollectionProperties):
"""
A collection of ellipses, drawn using splines.
"""
_input_ports = [
("units", "basic:String",
{'optional': True, 'defaults': "['points']"}),
("widths", "basic:String",
{'optional': True}),
("angles", "basic:String",
{'optional': True}),
("heights", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplEllipseCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplEllipseCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('units'):
artist.constructor_props['units'] = self.get_input('units')
if self.has_input('widths'):
artist.constructor_props['widths'] = self.get_input('widths')
if self.has_input('angles'):
artist.constructor_props['angles'] = self.get_input('angles')
if self.has_input('heights'):
artist.constructor_props['heights'] = self.get_input('heights')
class MplPatchCollectionProperties(MplCollectionProperties):
"""
A generic collection of patches.
This makes it easier to assign a color map to a heterogeneous
collection of patches.
This also may improve plotting speed, since PatchCollection will
draw faster than a large number of patches.
"""
_input_ports = [
("paths", "basic:String",
{'optional': True}),
("patches", "basic:String",
{'optional': True}),
("match_original", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPatchCollectionProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPatchCollectionProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('patches'):
artist.constructor_props['patches'] = self.get_input('patches')
if self.has_input('match_original'):
artist.constructor_props['match_original'] = self.get_input('match_original')
class MplTriMeshProperties(MplCollectionProperties):
"""
Class for the efficient drawing of a triangular mesh using
Gouraud shading.
A triangular mesh is a :class:`~matplotlib.tri.Triangulation`
object.
"""
_input_ports = [
("triangulation", "basic:String",
{'optional': True}),
("paths", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplTriMeshProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplTriMeshProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('triangulation'):
artist.constructor_props['triangulation'] = self.get_input('triangulation')
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
class MplQuadMeshProperties(MplCollectionProperties):
"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh consists of a grid of vertices. The
dimensions of this array are (*meshWidth* + 1, *meshHeight* +
1). Each vertex in the mesh has a different set of "mesh
coordinates" representing its position in the topology of the
mesh. For any values (*m*, *n*) such that 0 <= *m* <= *meshWidth*
and 0 <= *n* <= *meshHeight*, the vertices at mesh coordinates
(*m*, *n*), (*m*, *n* + 1), (*m* + 1, *n* + 1), and (*m* + 1, *n*)
form one of the quadrilaterals in the mesh. There are thus
(*meshWidth* * *meshHeight*) quadrilaterals in the mesh. The mesh
need not be regular and the polygons need not be convex.
A quadrilateral mesh is represented by a (2 x ((*meshWidth* + 1) *
(*meshHeight* + 1))) numpy array *coordinates*, where each row is
the *x* and *y* coordinates of one of the vertices. To define the
function that maps from a data point to its corresponding color,
use the :meth:`set_cmap` method. Each of these arrays is indexed in
row-major order by the mesh coordinates of the vertex (or the mesh
coordinates of the lower left vertex, in the case of the
colors).
For example, the first entry in *coordinates* is the
coordinates of the vertex at mesh coordinates (0, 0), then the one
at (0, 1), then at (0, 2) .. (0, meshWidth), (1, 0), (1, 1), and
so on.
*shading* may be 'flat', or 'gouraud'
"""
_input_ports = [
("paths", "basic:String",
{'optional': True}),
("meshWidth", "basic:String",
{'optional': True}),
("coordinates", "basic:String",
{'optional': True}),
("antialiased", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("shading", "basic:String",
{'optional': True, 'defaults': "['flat']"}),
("meshHeight", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplQuadMeshProperties)")]
class Artist(MplCollectionProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplCollectionProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplQuadMeshProperties.Artist()
self.set_output("value", artist)
MplCollectionProperties.compute(self, artist)
if self.has_input('paths'):
artist.props['paths'] = self.get_input('paths')
if self.has_input('meshWidth'):
artist.constructor_props['meshWidth'] = self.get_input('meshWidth')
if self.has_input('coordinates'):
artist.constructor_props['coordinates'] = self.get_input('coordinates')
if self.has_input('antialiased'):
artist.constructor_props['antialiased'] = self.get_input('antialiased')
if self.has_input('shading'):
artist.constructor_props['shading'] = self.get_input('shading')
if self.has_input('meshHeight'):
artist.constructor_props['meshHeight'] = self.get_input('meshHeight')
class MplPatchProperties(MplArtistProperties):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
_input_ports = [
("edgecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the patch edge color'}),
("facecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the patch face color'}),
("path_effects", "basic:String",
{'optional': True, 'docstring': 'set path_effects, which should be a list of instances of matplotlib.patheffect._Base class or its derivatives.'}),
("color", "basic:Color",
{'optional': True, 'docstring': 'Set both the edgecolor and the facecolor. .. seealso:\n\n:meth:`set_facecolor`, :meth:`set_edgecolor` For setting the edge or face color individually.'}),
("antialiased", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether to use antialiased rendering'}),
("hatch", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the hatching pattern\n\nhatch can be one of:\n\n/ - diagonal hatching \\ - back diagonal | - vertical - - horizontal + - crossed x - crossed diagonal o - small circle O - large circle . - dots * - stars\n\nLetters can be combined, in which case all the specified hatchings are done. If same letter repeats, it increases the density of hatching of that pattern.\n\nHatching is supported in the PostScript, PDF, SVG and Agg backends only.', 'values': '[[\'/\', "\'\\\\\'", "\'", "\'", \'-\', \'+\', \'x\', \'o\', \'O\', \'.\', \'*\']]', 'optional': True}),
("alpha", "basic:Float",
{'optional': True, 'docstring': 'Set the alpha tranparency of the patch.'}),
("linewidth", "basic:Float",
{'optional': True, 'docstring': 'Set the patch linewidth in points'}),
("linestyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the patch linestyle', 'values': "[['solid', 'dashed', 'dashdot', 'dotted']]", 'optional': True}),
("fill", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether to fill the patch'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPatchProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPatchProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('edgecolor'):
artist.props['edgecolor'] = self.get_input('edgecolor')
artist.props['edgecolor'] = translate_color(artist.props['edgecolor'])
if self.has_input('facecolor'):
artist.props['facecolor'] = self.get_input('facecolor')
artist.props['facecolor'] = translate_color(artist.props['facecolor'])
if self.has_input('path_effects'):
artist.props['path_effects'] = self.get_input('path_effects')
if self.has_input('color'):
artist.props['color'] = self.get_input('color')
artist.props['color'] = translate_color(artist.props['color'])
if self.has_input('antialiased'):
artist.props['antialiased'] = self.get_input('antialiased')
if self.has_input('hatch'):
artist.props['hatch'] = self.get_input('hatch')
if self.has_input('alpha'):
artist.props['alpha'] = self.get_input('alpha')
if self.has_input('linewidth'):
artist.props['linewidth'] = self.get_input('linewidth')
if self.has_input('linestyle'):
artist.props['linestyle'] = self.get_input('linestyle')
if self.has_input('fill'):
artist.props['fill'] = self.get_input('fill')
class MplShadowProperties(MplPatchProperties):
"""None
"""
_input_ports = [
("patch", "basic:String",
{'optional': True}),
("props", "basic:String",
{'optional': True}),
("oy", "basic:String",
{'optional': True}),
("ox", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplShadowProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplShadowProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('patch'):
artist.constructor_props['patch'] = self.get_input('patch')
if self.has_input('props'):
artist.constructor_props['props'] = self.get_input('props')
if self.has_input('oy'):
artist.constructor_props['oy'] = self.get_input('oy')
if self.has_input('ox'):
artist.constructor_props['ox'] = self.get_input('ox')
class MplRectangleProperties(MplPatchProperties):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
_input_ports = [
("bounds", "basic:String",
{'optional': True, 'docstring': 'Set the bounds of the rectangle: l,b,w,h'}),
("height", "basic:Float",
{'optional': True, 'docstring': 'Set the width rectangle'}),
("width", "basic:Float",
{'optional': True, 'docstring': 'Set the width rectangle'}),
("xy", "basic:List",
{'optional': True, 'docstring': 'Set the left and bottom coords of the rectangle'}),
("y", "basic:Float",
{'optional': True, 'docstring': 'Set the bottom coord of the rectangle'}),
("x", "basic:Float",
{'optional': True, 'docstring': 'Set the left coord of the rectangle'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplRectangleProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplRectangleProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('bounds'):
artist.props['bounds'] = self.get_input('bounds')
if self.has_input('height'):
artist.props['height'] = self.get_input('height')
if self.has_input('width'):
artist.props['width'] = self.get_input('width')
if self.has_input('xy'):
artist.props['xy'] = self.get_input('xy')
if self.has_input('y'):
artist.props['y'] = self.get_input('y')
if self.has_input('x'):
artist.props['x'] = self.get_input('x')
class MplRegularPolygonProperties(MplPatchProperties):
"""
A regular polygon patch.
"""
_input_ports = [
("xy", "basic:String",
{'optional': True, 'docstring': 'A length 2 tuple (x, y) of the center.'}),
("radius", "basic:Integer",
{'optional': True, 'docstring': 'The distance from the center to each of the vertices.', 'defaults': "['5']"}),
("orientation", "basic:Integer",
{'optional': True, 'docstring': 'rotates the polygon (in radians).', 'defaults': "['0']"}),
("numVertices", "basic:String",
{'optional': True, 'docstring': 'the number of vertices.'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplRegularPolygonProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplRegularPolygonProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('radius'):
artist.constructor_props['radius'] = self.get_input('radius')
if self.has_input('orientation'):
artist.constructor_props['orientation'] = self.get_input('orientation')
if self.has_input('numVertices'):
artist.constructor_props['numVertices'] = self.get_input('numVertices')
class MplCirclePolygonProperties(MplRegularPolygonProperties):
"""
A polygon-approximation of a circle patch.
"""
_input_ports = [
("radius", "basic:Integer",
{'optional': True, 'defaults': "['5']"}),
("xy", "basic:String",
{'optional': True}),
("resolution", "basic:Integer",
{'optional': True, 'defaults': "['20']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplCirclePolygonProperties)")]
class Artist(MplRegularPolygonProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplRegularPolygonProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplCirclePolygonProperties.Artist()
self.set_output("value", artist)
MplRegularPolygonProperties.compute(self, artist)
if self.has_input('radius'):
artist.constructor_props['radius'] = self.get_input('radius')
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('resolution'):
artist.constructor_props['resolution'] = self.get_input('resolution')
class MplPathPatchProperties(MplPatchProperties):
"""
A general polycurve path patch.
"""
_input_ports = [
("path", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPathPatchProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPathPatchProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('path'):
artist.constructor_props['path'] = self.get_input('path')
class MplPolygonProperties(MplPatchProperties):
"""
A general polygon patch.
"""
_input_ports = [
("xy", "basic:String",
{'optional': True}),
("closed", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplPolygonProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplPolygonProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('xy'):
artist.props['xy'] = self.get_input('xy')
if self.has_input('closed'):
artist.props['closed'] = self.get_input('closed')
class MplFancyArrowProperties(MplPolygonProperties):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_input_ports = [
("length_includes_head", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("head_length", "basic:String",
{'optional': True}),
("head_width", "basic:String",
{'optional': True}),
("width", "basic:Float",
{'optional': True, 'defaults': "['0.001']"}),
("shape", "basic:String",
{'optional': True, 'defaults': "['full']"}),
("dx", "basic:String",
{'optional': True}),
("dy", "basic:String",
{'optional': True}),
("y", "basic:String",
{'optional': True}),
("x", "basic:String",
{'optional': True}),
("head_starts_at_zero", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("overhang", "basic:Integer",
{'optional': True, 'defaults': "['0']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplFancyArrowProperties)")]
class Artist(MplPolygonProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPolygonProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplFancyArrowProperties.Artist()
self.set_output("value", artist)
MplPolygonProperties.compute(self, artist)
if self.has_input('length_includes_head'):
artist.constructor_props['length_includes_head'] = self.get_input('length_includes_head')
if self.has_input('head_length'):
artist.constructor_props['head_length'] = self.get_input('head_length')
if self.has_input('head_width'):
artist.constructor_props['head_width'] = self.get_input('head_width')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('shape'):
artist.constructor_props['shape'] = self.get_input('shape')
if self.has_input('dx'):
artist.constructor_props['dx'] = self.get_input('dx')
if self.has_input('dy'):
artist.constructor_props['dy'] = self.get_input('dy')
if self.has_input('y'):
artist.constructor_props['y'] = self.get_input('y')
if self.has_input('x'):
artist.constructor_props['x'] = self.get_input('x')
if self.has_input('head_starts_at_zero'):
artist.constructor_props['head_starts_at_zero'] = self.get_input('head_starts_at_zero')
if self.has_input('overhang'):
artist.constructor_props['overhang'] = self.get_input('overhang')
class MplWedgeProperties(MplPatchProperties):
"""
Wedge shaped patch.
"""
_input_ports = [
("theta2", "basic:String",
{'optional': True}),
("width", "basic:String",
{'optional': True}),
("r", "basic:String",
{'optional': True}),
("theta1", "basic:String",
{'optional': True}),
("center", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplWedgeProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplWedgeProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('theta2'):
artist.constructor_props['theta2'] = self.get_input('theta2')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('r'):
artist.constructor_props['r'] = self.get_input('r')
if self.has_input('theta1'):
artist.constructor_props['theta1'] = self.get_input('theta1')
if self.has_input('center'):
artist.constructor_props['center'] = self.get_input('center')
class MplArrowProperties(MplPatchProperties):
"""
An arrow patch.
"""
_input_ports = [
("y", "basic:String",
{'optional': True}),
("x", "basic:String",
{'optional': True}),
("dy", "basic:String",
{'optional': True}),
("dx", "basic:String",
{'optional': True}),
("width", "basic:Float",
{'optional': True, 'defaults': "['1.0']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplArrowProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplArrowProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('y'):
artist.constructor_props['y'] = self.get_input('y')
if self.has_input('x'):
artist.constructor_props['x'] = self.get_input('x')
if self.has_input('dy'):
artist.constructor_props['dy'] = self.get_input('dy')
if self.has_input('dx'):
artist.constructor_props['dx'] = self.get_input('dx')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
class MplYAArrowProperties(MplPatchProperties):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
_input_ports = [
("xytip", "basic:String",
{'optional': True, 'docstring': '(x, y) location of arrow tip'}),
("headwidth", "basic:Integer",
{'optional': True, 'docstring': 'The width of the base of the arrow head in points', 'defaults': "['12']"}),
("frac", "basic:Float",
{'optional': True, 'docstring': 'The fraction of the arrow length occupied by the head', 'defaults': "['0.1']"}),
("figure", "basic:String",
{'optional': True, 'docstring': 'The :class:`~matplotlib.figure.Figure` instance (fig.dpi)'}),
("xybase", "basic:String",
{'optional': True, 'docstring': '(x, y) location the arrow base mid point'}),
("width", "basic:Integer",
{'optional': True, 'docstring': 'The width of the arrow in points', 'defaults': "['4']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplYAArrowProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplYAArrowProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('xytip'):
artist.constructor_props['xytip'] = self.get_input('xytip')
if self.has_input('headwidth'):
artist.constructor_props['headwidth'] = self.get_input('headwidth')
if self.has_input('frac'):
artist.constructor_props['frac'] = self.get_input('frac')
if self.has_input('figure'):
artist.constructor_props['figure'] = self.get_input('figure')
if self.has_input('xybase'):
artist.constructor_props['xybase'] = self.get_input('xybase')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
class MplEllipseProperties(MplPatchProperties):
"""
A scale-free ellipse.
"""
_input_ports = [
("width", "basic:String",
{'optional': True}),
("xy", "basic:String",
{'optional': True}),
("angle", "basic:Float",
{'optional': True, 'defaults': "['0.0']"}),
("height", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplEllipseProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplEllipseProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('angle'):
artist.constructor_props['angle'] = self.get_input('angle')
if self.has_input('height'):
artist.constructor_props['height'] = self.get_input('height')
class MplCircleProperties(MplEllipseProperties):
"""
A circle patch.
"""
_input_ports = [
("xy", "basic:String",
{'optional': True}),
("radius", "basic:Float",
{'optional': True, 'docstring': 'Set the radius of the circle'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplCircleProperties)")]
class Artist(MplEllipseProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplEllipseProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplCircleProperties.Artist()
self.set_output("value", artist)
MplEllipseProperties.compute(self, artist)
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('radius'):
artist.props['radius'] = self.get_input('radius')
class MplArcProperties(MplEllipseProperties):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
_input_ports = [
("theta2", "basic:Float",
{'optional': True, 'defaults': "['360.0']"}),
("theta1", "basic:Float",
{'optional': True, 'defaults': "['0.0']"}),
("angle", "basic:Float",
{'optional': True, 'defaults': "['0.0']"}),
("height", "basic:String",
{'optional': True}),
("width", "basic:String",
{'optional': True}),
("xy", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplArcProperties)")]
class Artist(MplEllipseProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplEllipseProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplArcProperties.Artist()
self.set_output("value", artist)
MplEllipseProperties.compute(self, artist)
if self.has_input('theta2'):
artist.constructor_props['theta2'] = self.get_input('theta2')
if self.has_input('theta1'):
artist.constructor_props['theta1'] = self.get_input('theta1')
if self.has_input('angle'):
artist.constructor_props['angle'] = self.get_input('angle')
if self.has_input('height'):
artist.constructor_props['height'] = self.get_input('height')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
class MplFancyBboxPatchProperties(MplPatchProperties):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
_input_ports = [
("mutation_scale", "basic:Float",
{'optional': True, 'docstring': 'Set the mutation scale.'}),
("bbox_transmuter", "basic:String",
{'optional': True}),
("bounds", "basic:String",
{'optional': True, 'docstring': 'Set the bounds of the rectangle: l,b,w,h'}),
("height", "basic:Float",
{'optional': True, 'docstring': 'Set the width rectangle'}),
("width", "basic:Float",
{'optional': True, 'docstring': 'Set the width rectangle'}),
("xy", "basic:String",
{'optional': True}),
("boxstyle", "basic:String",
{'optional': True, 'docstring': 'Set the box style.\n\nboxstyle can be a string with boxstyle name with optional comma-separated attributes. Alternatively, the attrs can be provided as keywords:\n\nset_boxstyle("round,pad=0.2") set_boxstyle("round", pad=0.2)\n\nOld attrs simply are forgotten.\n\nWithout argument (or with boxstyle = None), it returns available box styles.'}),
("mutation_aspect", "basic:Float",
{'optional': True, 'docstring': 'Set the aspect ratio of the bbox mutation.'}),
("y", "basic:Float",
{'optional': True, 'docstring': 'Set the bottom coord of the rectangle'}),
("x", "basic:Float",
{'optional': True, 'docstring': 'Set the left coord of the rectangle'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplFancyBboxPatchProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplFancyBboxPatchProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('mutation_scale'):
artist.props['mutation_scale'] = self.get_input('mutation_scale')
if self.has_input('bbox_transmuter'):
artist.constructor_props['bbox_transmuter'] = self.get_input('bbox_transmuter')
if self.has_input('bounds'):
artist.props['bounds'] = self.get_input('bounds')
if self.has_input('height'):
artist.props['height'] = self.get_input('height')
if self.has_input('width'):
artist.props['width'] = self.get_input('width')
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('boxstyle'):
artist.props['boxstyle'] = self.get_input('boxstyle')
if self.has_input('mutation_aspect'):
artist.props['mutation_aspect'] = self.get_input('mutation_aspect')
if self.has_input('y'):
artist.props['y'] = self.get_input('y')
if self.has_input('x'):
artist.props['x'] = self.get_input('x')
class MplFancyArrowPatchProperties(MplPatchProperties):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
_input_ports = [
("connectionstyle", "basic:String",
{'optional': True, 'docstring': 'Set the connection style.\n\nOld attrs simply are forgotten.\n\nWithout argument (or with connectionstyle=None), return available styles as a list of strings.'}),
("mutation_scale", "basic:Float",
{'optional': True, 'docstring': 'Set the mutation scale.'}),
("arrowstyle", "basic:String",
{'optional': True, 'docstring': 'Set the arrow style.\n\nOld attrs simply are forgotten.\n\nWithout argument (or with arrowstyle=None), return available box styles as a list of strings.'}),
("arrow_transmuter", "basic:String",
{'optional': True}),
("positions", "basic:String",
{'optional': True}),
("shrinkA", "basic:Float",
{'optional': True, 'defaults': "['2.0']"}),
("posB", "basic:String",
{'optional': True}),
("dpi_cor", "basic:String",
{'optional': True, 'docstring': 'dpi_cor is currently used for linewidth-related things and shink factor. Mutation scale is not affected by this.'}),
("connector", "basic:String",
{'optional': True}),
("path", "basic:String",
{'optional': True}),
("shrinkB", "basic:Float",
{'optional': True, 'defaults': "['2.0']"}),
("mutation_aspect", "basic:Float",
{'optional': True, 'docstring': 'Set the aspect ratio of the bbox mutation.'}),
("patchA", "basic:String",
{'optional': True, 'docstring': 'set the begin patch.'}),
("patchB", "basic:String",
{'optional': True, 'docstring': 'set the begin patch'}),
("posA", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplFancyArrowPatchProperties)")]
class Artist(MplPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplFancyArrowPatchProperties.Artist()
self.set_output("value", artist)
MplPatchProperties.compute(self, artist)
if self.has_input('connectionstyle'):
artist.props['connectionstyle'] = self.get_input('connectionstyle')
if self.has_input('mutation_scale'):
artist.props['mutation_scale'] = self.get_input('mutation_scale')
if self.has_input('arrowstyle'):
artist.props['arrowstyle'] = self.get_input('arrowstyle')
if self.has_input('arrow_transmuter'):
artist.constructor_props['arrow_transmuter'] = self.get_input('arrow_transmuter')
if self.has_input('positions'):
artist.props['positions'] = self.get_input('positions')
if self.has_input('shrinkA'):
artist.constructor_props['shrinkA'] = self.get_input('shrinkA')
if self.has_input('posB'):
artist.constructor_props['posB'] = self.get_input('posB')
if self.has_input('dpi_cor'):
artist.props['dpi_cor'] = self.get_input('dpi_cor')
if self.has_input('connector'):
artist.constructor_props['connector'] = self.get_input('connector')
if self.has_input('path'):
artist.constructor_props['path'] = self.get_input('path')
if self.has_input('shrinkB'):
artist.constructor_props['shrinkB'] = self.get_input('shrinkB')
if self.has_input('mutation_aspect'):
artist.props['mutation_aspect'] = self.get_input('mutation_aspect')
if self.has_input('patchA'):
artist.props['patchA'] = self.get_input('patchA')
if self.has_input('patchB'):
artist.props['patchB'] = self.get_input('patchB')
if self.has_input('posA'):
artist.constructor_props['posA'] = self.get_input('posA')
class MplConnectionPatchProperties(MplFancyArrowPatchProperties):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
_input_ports = [
("connectionstyle", "basic:String",
{'optional': True, 'docstring': 'the connection style', 'defaults': "['arc3']"}),
("coordsA", "basic:String",
{'entry_types': "['enum']", 'values': "[['figure points', 'figure pixels', 'figure fraction', 'axes points', 'axes pixels', 'axes fraction', 'data', 'offset points', 'polar']]", 'optional': True}),
("arrowstyle", "basic:String",
{'optional': True, 'docstring': 'the arrow style', 'defaults': "['-']"}),
("clip_on", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("arrow_transmuter", "basic:String",
{'optional': True}),
("axesA", "basic:String",
{'optional': True}),
("axesB", "basic:String",
{'optional': True}),
("annotation_clip", "basic:String",
{'optional': True, 'docstring': 'set annotation_clip attribute.\n\nNone: the self.xy will be checked only if xycoords is "data"'}),
("dpi_cor", "basic:Float",
{'optional': True, 'defaults': "['1.0']"}),
("connector", "basic:String",
{'optional': True}),
("xyA", "basic:String",
{'optional': True}),
("xyB", "basic:String",
{'optional': True}),
("relpos", "basic:String",
{'optional': True, 'docstring': 'default is (0.5, 0.5)', 'defaults': "['(0.5']"}),
("shrinkB", "basic:Float",
{'optional': True, 'docstring': 'default is 2 points', 'defaults': "['2']"}),
("shrinkA", "basic:Float",
{'optional': True, 'docstring': 'default is 2 points', 'defaults': "['2']"}),
("mutation_aspect", "basic:Integer",
{'optional': True, 'docstring': 'default is 1.', 'defaults': "['1']"}),
("mutation_scale", "basic:String",
{'optional': True, 'docstring': 'default is text size (in points)', 'defaults': "['text']"}),
("patchA", "basic:String",
{'optional': True, 'docstring': 'default is bounding box of the text', 'defaults': "['bounding']"}),
("patchB", "basic:String",
{'optional': True, 'docstring': 'default is None'}),
("coordsB", "basic:String",
{'entry_types': "['enum']", 'values': "[['figure points', 'figure pixels', 'figure fraction', 'axes points', 'axes pixels', 'axes fraction', 'data', 'offset points', 'polar']]", 'optional': True}),
("?", "basic:String",
{'optional': True, 'docstring': 'any key for :class:`matplotlib.patches.PathPatch`'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplConnectionPatchProperties)")]
class Artist(MplFancyArrowPatchProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplFancyArrowPatchProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplConnectionPatchProperties.Artist()
self.set_output("value", artist)
MplFancyArrowPatchProperties.compute(self, artist)
if self.has_input('connectionstyle'):
artist.constructor_props['connectionstyle'] = self.get_input('connectionstyle')
if self.has_input('coordsA'):
artist.constructor_props['coordsA'] = self.get_input('coordsA')
if self.has_input('arrowstyle'):
artist.constructor_props['arrowstyle'] = self.get_input('arrowstyle')
if self.has_input('clip_on'):
artist.constructor_props['clip_on'] = self.get_input('clip_on')
if self.has_input('arrow_transmuter'):
artist.constructor_props['arrow_transmuter'] = self.get_input('arrow_transmuter')
if self.has_input('axesA'):
artist.constructor_props['axesA'] = self.get_input('axesA')
if self.has_input('axesB'):
artist.constructor_props['axesB'] = self.get_input('axesB')
if self.has_input('annotation_clip'):
artist.props['annotation_clip'] = self.get_input('annotation_clip')
if self.has_input('dpi_cor'):
artist.constructor_props['dpi_cor'] = self.get_input('dpi_cor')
if self.has_input('connector'):
artist.constructor_props['connector'] = self.get_input('connector')
if self.has_input('xyA'):
artist.constructor_props['xyA'] = self.get_input('xyA')
if self.has_input('xyB'):
artist.constructor_props['xyB'] = self.get_input('xyB')
if self.has_input('relpos'):
artist.constructor_props['relpos'] = self.get_input('relpos')
if self.has_input('shrinkB'):
artist.constructor_props['shrinkB'] = self.get_input('shrinkB')
if self.has_input('shrinkA'):
artist.constructor_props['shrinkA'] = self.get_input('shrinkA')
if self.has_input('mutation_aspect'):
artist.constructor_props['mutation_aspect'] = self.get_input('mutation_aspect')
if self.has_input('mutation_scale'):
artist.constructor_props['mutation_scale'] = self.get_input('mutation_scale')
if self.has_input('patchA'):
artist.constructor_props['patchA'] = self.get_input('patchA')
if self.has_input('patchB'):
artist.constructor_props['patchB'] = self.get_input('patchB')
if self.has_input('coordsB'):
artist.constructor_props['coordsB'] = self.get_input('coordsB')
if self.has_input('?'):
artist.constructor_props['?'] = self.get_input('?')
class MplLine2DProperties(MplArtistProperties):
"""
A line - the line can have both a solid linestyle connecting all
the vertices, and a marker at each vertex. Additionally, the
drawing of the solid line is influenced by the drawstyle, eg one
can create "stepped" lines in various styles.
"""
_input_ports = [
("picker", "basic:Float",
{'optional': True, 'docstring': 'Sets the event picker details for the line.'}),
("dash_capstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the cap style for dashed linestyles', 'values': "[['butt', 'round', 'projecting']]", 'optional': True}),
("color", "basic:Color",
{'optional': True, 'docstring': 'Set the color of the line'}),
("markevery", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the markevery property to subsample the plot when using markers. Eg if markevery=5, every 5-th marker will be plotted. every can be', 'values': "[['(startind, stride)']]", 'optional': True}),
("markeredgecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the marker edge color'}),
("marker", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the line marker\n\nThe marker can also be a tuple (numsides, style, angle), which will create a custom, regular symbol.\n\n\n\nFor backward compatibility, the form (verts, 0) is also accepted, but it is equivalent to just verts for giving a raw set of vertices that define the shape.', 'values': "[['caretdown', 'caretleft', 'caretright', 'caretup', 'circle', 'diamond', 'hexagon1', 'hexagon2', 'hline', 'nothing', 'octagon', 'pentagon', 'pixel', 'plus', 'point', 'square', 'star', 'thin_diamond', 'tickdown', 'tickleft', 'tickright', 'tickup', 'tri_down', 'tri_left', 'tri_right', 'tri_up', 'triangle_down', 'triangle_left', 'triangle_right', 'triangle_up', 'vline', 'x', 'mathtext']]", 'optional': True}),
("markerfacecoloralt", "basic:Color",
{'optional': True, 'docstring': 'Set the alternate marker face color.'}),
("linewidth", "basic:Float",
{'optional': True, 'docstring': 'Set the line width in points'}),
("linestyle", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the linestyle of the line (also accepts drawstyles)\n\n'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility.", 'values': "[['solid', 'dashed', 'dash_dot', 'dotted', 'draw nothing', 'draw nothing', 'draw nothing']]", 'optional': True}),
("solid_joinstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the join style for solid linestyles', 'values': "[['miter', 'round', 'bevel']]", 'optional': True}),
("markerfacecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the marker face color.'}),
("axes", "basic:String",
{'optional': True, 'docstring': 'Set the :class:`~matplotlib.axes.Axes` instance in which the artist resides, if any.'}),
("transform", "basic:String",
{'optional': True, 'docstring': 'set the Transformation instance used by this artist'}),
("fillstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the marker fill style; 'full' means fill the whole marker. 'none' means no filling; other options are for half-filled markers.", 'values': "[['full', 'left', 'right', 'bottom', 'top', 'none']]", 'optional': True}),
("markeredgewidth", "basic:Float",
{'optional': True, 'docstring': 'Set the marker edge width in points'}),
("solid_capstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the cap style for solid linestyles', 'values': "[['butt', 'round', 'projecting']]", 'optional': True}),
("dashes", "basic:List",
{'optional': True, 'docstring': 'Set the dash sequence, sequence of dashes with on off ink in points. If seq is empty or if seq = (None, None), the linestyle will be set to solid.'}),
("markersize", "basic:Float",
{'optional': True, 'docstring': 'Set the marker size in points'}),
("antialiased", "basic:Boolean",
{'optional': True, 'docstring': 'True if line should be drawin with antialiased rendering'}),
("xdata", "basic:String",
{'optional': True, 'docstring': 'Set the data np.array for x'}),
("drawstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the drawstyle of the plot\n\n'default' connects the points with lines. The steps variants produce step-plots. 'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility.", 'values': "[['default', 'steps', 'steps-pre', 'steps-mid', 'steps-post']]", 'optional': True}),
("data", "basic:String",
{'optional': True, 'docstring': 'Set the x and y data'}),
("dash_joinstyle", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the join style for dashed linestyles', 'values': "[['miter', 'round', 'bevel']]", 'optional': True}),
("pickradius", "basic:Float",
{'optional': True, 'docstring': 'Sets the pick radius used for containment tests'}),
("ydata", "basic:String",
{'optional': True, 'docstring': 'Set the data np.array for y'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplLine2DProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplLine2DProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('picker'):
artist.props['picker'] = self.get_input('picker')
if self.has_input('dash_capstyle'):
artist.props['dash_capstyle'] = self.get_input('dash_capstyle')
if self.has_input('color'):
artist.props['color'] = self.get_input('color')
artist.props['color'] = translate_color(artist.props['color'])
if self.has_input('markevery'):
artist.props['markevery'] = self.get_input('markevery')
if self.has_input('markeredgecolor'):
artist.props['markeredgecolor'] = self.get_input('markeredgecolor')
artist.props['markeredgecolor'] = translate_color(artist.props['markeredgecolor'])
if self.has_input('marker'):
artist.props['marker'] = self.get_input('marker')
artist.props['marker'] = translate_MplLine2DProperties_marker(artist.props['marker'])
if self.has_input('markerfacecoloralt'):
artist.props['markerfacecoloralt'] = self.get_input('markerfacecoloralt')
artist.props['markerfacecoloralt'] = translate_color(artist.props['markerfacecoloralt'])
if self.has_input('linewidth'):
artist.props['linewidth'] = self.get_input('linewidth')
if self.has_input('linestyle'):
artist.props['linestyle'] = self.get_input('linestyle')
artist.props['linestyle'] = translate_MplLine2DProperties_linestyle(artist.props['linestyle'])
if self.has_input('solid_joinstyle'):
artist.props['solid_joinstyle'] = self.get_input('solid_joinstyle')
if self.has_input('markerfacecolor'):
artist.props['markerfacecolor'] = self.get_input('markerfacecolor')
artist.props['markerfacecolor'] = translate_color(artist.props['markerfacecolor'])
if self.has_input('axes'):
artist.props['axes'] = self.get_input('axes')
if self.has_input('transform'):
artist.props['transform'] = self.get_input('transform')
if self.has_input('fillstyle'):
artist.props['fillstyle'] = self.get_input('fillstyle')
if self.has_input('markeredgewidth'):
artist.props['markeredgewidth'] = self.get_input('markeredgewidth')
if self.has_input('solid_capstyle'):
artist.props['solid_capstyle'] = self.get_input('solid_capstyle')
if self.has_input('dashes'):
artist.props['dashes'] = self.get_input('dashes')
if self.has_input('markersize'):
artist.props['markersize'] = self.get_input('markersize')
if self.has_input('antialiased'):
artist.props['antialiased'] = self.get_input('antialiased')
if self.has_input('xdata'):
artist.props['xdata'] = self.get_input('xdata')
if self.has_input('drawstyle'):
artist.props['drawstyle'] = self.get_input('drawstyle')
if self.has_input('data'):
artist.props['data'] = self.get_input('data')
if self.has_input('dash_joinstyle'):
artist.props['dash_joinstyle'] = self.get_input('dash_joinstyle')
if self.has_input('pickradius'):
artist.props['pickradius'] = self.get_input('pickradius')
if self.has_input('ydata'):
artist.props['ydata'] = self.get_input('ydata')
class MplTextProperties(MplArtistProperties):
"""
Handle storing and drawing of text in window or data coordinates.
"""
_input_ports = [
("rotation_mode", "basic:String",
{'optional': True, 'docstring': 'set text rotation mode. If "anchor", the un-rotated text will first aligned according to their ha and va, and then will be rotated with the alignement reference point as a origin. If None (default), the text will be rotated first then will be aligned.'}),
("style", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the font style.', 'values': "[['normal', 'italic', 'oblique']]", 'optional': True}),
("linespacing", "basic:Float",
{'optional': True, 'docstring': 'Set the line spacing as a multiple of the font size. Default is 1.2.'}),
("family", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the font family. May be either a single string, or a list of strings in decreasing priority. Each string may be either a real font name or a generic font class name. If the latter, the specific font names will be looked up in the :file:`matplotlibrc` file.', 'values': "[['FONTNAME', 'serif', 'sans-serif', 'cursive', 'fantasy', 'monospace']]", 'optional': True}),
("x", "basic:Float",
{'optional': True, 'docstring': 'Set the x position of the text'}),
("color", "basic:Color",
{'optional': True, 'docstring': 'Set the foreground color of the text'}),
("text", "basic:String",
{'optional': True, 'docstring': 'Set the text string s\n\nIt may contain newlines (\\n) or math in LaTeX syntax.'}),
("verticalalignment", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the vertical alignment', 'values': "[['center', 'top', 'bottom', 'baseline']]", 'optional': True}),
("variant", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the font variant, either 'normal' or 'small-caps'.", 'values': "[['normal', 'small-caps']]", 'optional': True}),
("path_effects", "basic:String",
{'optional': True}),
("weight", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the font weight.', 'values': "[['a numeric value in range 0-1000', 'ultralight', 'light', 'normal', 'regular', 'book', 'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy', 'extra bold', 'black']]", 'optional': True}),
("stretch", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the font stretch (horizontal condensation or expansion).', 'values': "[['a numeric value in range 0-1000', 'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded']]", 'optional': True}),
("fontproperties", "basic:String",
{'optional': True, 'docstring': 'Set the font properties that control the text. fp must be a :class:`matplotlib.font_manager.FontProperties` object.'}),
("horizontalalignment", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the horizontal alignment to one of', 'values': "[['center', 'right', 'left']]", 'optional': True}),
("bbox", "basic:String",
{'optional': True, 'docstring': 'Draw a bounding box around self. rectprops are any settable properties for a rectangle, eg facecolor=\'red\', alpha=0.5.\n\nt.set_bbox(dict(facecolor=\'red\', alpha=0.5))\n\nIf rectprops has "boxstyle" key. A FancyBboxPatch is initialized with rectprops and will be drawn. The mutation scale of the FancyBboxPath is set to the fontsize.'}),
("backgroundcolor", "basic:Color",
{'optional': True, 'docstring': 'Set the background color of the text by updating the bbox.'}),
("position", "basic:String",
{'optional': True, 'docstring': 'Set the (x, y) position of the text'}),
("y", "basic:Float",
{'optional': True, 'docstring': 'Set the y position of the text'}),
("multialignment", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the alignment for multiple lines layout. The layout of the bounding box of all the lines is determined bu the horizontalalignment and verticalalignment properties, but the multiline text within that box can be', 'values': "[['left', 'right', 'center']]", 'optional': True}),
("rotation", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the rotation of the text', 'values': "[['angle in degrees', 'vertical', 'horizontal']]", 'optional': True}),
("size", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the font size. May be either a size string, relative to the default font size, or an absolute font size in points.', 'values': "[['size in points', 'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large']]", 'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplTextProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplTextProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('rotation_mode'):
artist.props['rotation_mode'] = self.get_input('rotation_mode')
if self.has_input('style'):
artist.props['style'] = self.get_input('style')
if self.has_input('linespacing'):
artist.props['linespacing'] = self.get_input('linespacing')
if self.has_input('family'):
artist.props['family'] = self.get_input('family')
if self.has_input('x'):
artist.props['x'] = self.get_input('x')
if self.has_input('color'):
artist.props['color'] = self.get_input('color')
artist.props['color'] = translate_color(artist.props['color'])
if self.has_input('text'):
artist.props['text'] = self.get_input('text')
if self.has_input('verticalalignment'):
artist.props['verticalalignment'] = self.get_input('verticalalignment')
if self.has_input('variant'):
artist.props['variant'] = self.get_input('variant')
if self.has_input('path_effects'):
artist.props['path_effects'] = self.get_input('path_effects')
if self.has_input('weight'):
artist.props['weight'] = self.get_input('weight')
if self.has_input('stretch'):
artist.props['stretch'] = self.get_input('stretch')
if self.has_input('fontproperties'):
artist.props['fontproperties'] = self.get_input('fontproperties')
if self.has_input('horizontalalignment'):
artist.props['horizontalalignment'] = self.get_input('horizontalalignment')
if self.has_input('bbox'):
artist.props['bbox'] = self.get_input('bbox')
if self.has_input('backgroundcolor'):
artist.props['backgroundcolor'] = self.get_input('backgroundcolor')
artist.props['backgroundcolor'] = translate_color(artist.props['backgroundcolor'])
if self.has_input('position'):
artist.props['position'] = self.get_input('position')
if self.has_input('y'):
artist.props['y'] = self.get_input('y')
if self.has_input('multialignment'):
artist.props['multialignment'] = self.get_input('multialignment')
if self.has_input('rotation'):
artist.props['rotation'] = self.get_input('rotation')
if self.has_input('size'):
artist.props['size'] = self.get_input('size')
class MplTextWithDashProperties(MplTextProperties):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
_input_ports = [
("dashpush", "basic:Float",
{'optional': True, 'docstring': 'Set the "push" of the TextWithDash, which is the extra spacing between the beginning of the dash and the specified position.'}),
("dashdirection", "basic:String",
{'optional': True, 'docstring': "Set the direction of the dash following the text. 1 is before the text and 0 is after. The default is 0, which is what you'd want for the typical case of ticks below and on the left of the figure."}),
("linespacing", "basic:String",
{'optional': True}),
("figure", "basic:String",
{'optional': True, 'docstring': 'Set the figure instance the artist belong to.'}),
("color", "basic:String",
{'optional': True}),
("text", "basic:String",
{'optional': True, 'defaults': "['']"}),
("verticalalignment", "basic:String",
{'optional': True, 'defaults': "['center']"}),
("dashpad", "basic:Float",
{'optional': True, 'docstring': 'Set the "pad" of the TextWithDash, which is the extra spacing between the dash and the text, in canvas units.'}),
("dashrotation", "basic:Float",
{'optional': True, 'docstring': 'Set the rotation of the dash, in degrees'}),
("transform", "basic:String",
{'optional': True, 'docstring': 'Set the :class:`matplotlib.transforms.Transform` instance used by this artist.'}),
("fontproperties", "basic:String",
{'optional': True}),
("multialignment", "basic:String",
{'optional': True}),
("x", "basic:Float",
{'optional': True, 'docstring': 'Set the x position of the :class:`TextWithDash`.'}),
("y", "basic:Float",
{'optional': True, 'docstring': 'Set the y position of the :class:`TextWithDash`.'}),
("position", "basic:String",
{'optional': True, 'docstring': 'Set the (x, y) position of the :class:`TextWithDash`.'}),
("dashlength", "basic:Float",
{'optional': True, 'docstring': 'Set the length of the dash.'}),
("rotation", "basic:String",
{'optional': True}),
("horizontalalignment", "basic:String",
{'optional': True, 'defaults': "['center']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplTextWithDashProperties)")]
class Artist(MplTextProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplTextProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplTextWithDashProperties.Artist()
self.set_output("value", artist)
MplTextProperties.compute(self, artist)
if self.has_input('dashpush'):
artist.props['dashpush'] = self.get_input('dashpush')
if self.has_input('dashdirection'):
artist.props['dashdirection'] = self.get_input('dashdirection')
if self.has_input('linespacing'):
artist.constructor_props['linespacing'] = self.get_input('linespacing')
if self.has_input('figure'):
artist.props['figure'] = self.get_input('figure')
if self.has_input('color'):
artist.constructor_props['color'] = self.get_input('color')
if self.has_input('text'):
artist.constructor_props['text'] = self.get_input('text')
if self.has_input('verticalalignment'):
artist.constructor_props['verticalalignment'] = self.get_input('verticalalignment')
if self.has_input('dashpad'):
artist.props['dashpad'] = self.get_input('dashpad')
if self.has_input('dashrotation'):
artist.props['dashrotation'] = self.get_input('dashrotation')
if self.has_input('transform'):
artist.props['transform'] = self.get_input('transform')
if self.has_input('fontproperties'):
artist.constructor_props['fontproperties'] = self.get_input('fontproperties')
if self.has_input('multialignment'):
artist.constructor_props['multialignment'] = self.get_input('multialignment')
if self.has_input('x'):
artist.props['x'] = self.get_input('x')
if self.has_input('y'):
artist.props['y'] = self.get_input('y')
if self.has_input('position'):
artist.props['position'] = self.get_input('position')
if self.has_input('dashlength'):
artist.props['dashlength'] = self.get_input('dashlength')
if self.has_input('rotation'):
artist.constructor_props['rotation'] = self.get_input('rotation')
if self.has_input('horizontalalignment'):
artist.constructor_props['horizontalalignment'] = self.get_input('horizontalalignment')
class MplTickProperties(MplArtistProperties):
"""
Abstract base class for the axis ticks, grid lines and labels
1 refers to the bottom of the plot for xticks and the left for yticks
2 refers to the top of the plot for xticks and the right for yticks
Publicly accessible attributes:
:attr:`tick1line`
a Line2D instance
:attr:`tick2line`
a Line2D instance
:attr:`gridline`
a Line2D instance
:attr:`label1`
a Text instance
:attr:`label2`
a Text instance
:attr:`gridOn`
a boolean which determines whether to draw the tickline
:attr:`tick1On`
a boolean which determines whether to draw the 1st tickline
:attr:`tick2On`
a boolean which determines whether to draw the 2nd tickline
:attr:`label1On`
a boolean which determines whether to draw tick label
:attr:`label2On`
a boolean which determines whether to draw tick label
"""
_input_ports = [
("label1On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("loc", "basic:String",
{'optional': True}),
("major", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("label2On", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("color", "basic:Color",
{'optional': True}),
("axes", "basic:String",
{'optional': True}),
("clip_path", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the artist's clip path, which may be:\n\na :class:`~matplotlib.patches.Patch` (or subclass) instance\n\n\n\nNone, to remove the clipping path\n\nFor efficiency, if the path happens to be an axis-aligned rectangle, this method will set the clipping box to the corresponding rectangle and set the clipping path to None.", 'values': "[['(:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`)', ':class:`~matplotlib.patches.Patch`']]", 'optional': True}),
("label", "basic:String",
{'optional': True, 'docstring': 'Set the text of ticklabel'}),
("labelcolor", "basic:String",
{'optional': True}),
("tickdir", "basic:String",
{'optional': True}),
("pad", "basic:Float",
{'optional': True, 'docstring': 'Set the tick label pad in points'}),
("gridOn", "basic:Boolean",
{'optional': True, 'docstring': 'a boolean which determines whether to draw the tickline'}),
("zorder", "basic:String",
{'optional': True}),
("tick2On", "basic:Boolean",
{'optional': True, 'docstring': 'a boolean which determines whether to draw the 2nd tickline', 'defaults': "['True']"}),
("labelsize", "basic:String",
{'optional': True}),
("width", "basic:String",
{'optional': True}),
("tick1On", "basic:Boolean",
{'optional': True, 'docstring': 'a boolean which determines whether to draw the 1st tickline', 'defaults': "['True']"}),
("size", "basic:String",
{'optional': True}),
("label1Properties", "MplTextProperties",
{'docstring': 'Set the text of ticklabel'}),
("label2Properties", "MplTextProperties",
{'docstring': 'Set the text of ticklabel2'}),
("tick1lineProperties", "MplLine2DProperties",
{}),
("tick2lineProperties", "MplLine2DProperties",
{}),
("gridlineProperties", "MplLine2DProperties",
{}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplTickProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
if 'label1' in self.sub_props:
self.sub_props['label1'].update_props(obj.label1)
if 'label2' in self.sub_props:
self.sub_props['label2'].update_props(obj.label2)
if 'tick1line' in self.sub_props:
self.sub_props['tick1line'].update_props(obj.tick1line)
if 'tick2line' in self.sub_props:
self.sub_props['tick2line'].update_props(obj.tick2line)
if 'gridline' in self.sub_props:
self.sub_props['gridline'].update_props(obj.gridline)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplTickProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('label1On'):
artist.not_setp_props['label1On'] = self.get_input('label1On')
if self.has_input('loc'):
artist.constructor_props['loc'] = self.get_input('loc')
if self.has_input('major'):
artist.constructor_props['major'] = self.get_input('major')
if self.has_input('label2On'):
artist.not_setp_props['label2On'] = self.get_input('label2On')
if self.has_input('color'):
artist.constructor_props['color'] = self.get_input('color')
artist.constructor_props['color'] = translate_color(artist.constructor_props['color'])
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('clip_path'):
artist.props['clip_path'] = self.get_input('clip_path')
if self.has_input('label'):
artist.props['label'] = self.get_input('label')
if self.has_input('labelcolor'):
artist.constructor_props['labelcolor'] = self.get_input('labelcolor')
if self.has_input('tickdir'):
artist.constructor_props['tickdir'] = self.get_input('tickdir')
if self.has_input('pad'):
artist.props['pad'] = self.get_input('pad')
if self.has_input('gridOn'):
artist.not_setp_props['gridOn'] = self.get_input('gridOn')
if self.has_input('zorder'):
artist.constructor_props['zorder'] = self.get_input('zorder')
if self.has_input('tick2On'):
artist.not_setp_props['tick2On'] = self.get_input('tick2On')
if self.has_input('labelsize'):
artist.constructor_props['labelsize'] = self.get_input('labelsize')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('tick1On'):
artist.not_setp_props['tick1On'] = self.get_input('tick1On')
if self.has_input('size'):
artist.constructor_props['size'] = self.get_input('size')
if self.has_input('label1Properties'):
artist.sub_props['label1'] = self.get_input('label1Properties')
if self.has_input('label2Properties'):
artist.sub_props['label2'] = self.get_input('label2Properties')
if self.has_input('tick1lineProperties'):
artist.sub_props['tick1line'] = self.get_input('tick1lineProperties')
if self.has_input('tick2lineProperties'):
artist.sub_props['tick2line'] = self.get_input('tick2lineProperties')
if self.has_input('gridlineProperties'):
artist.sub_props['gridline'] = self.get_input('gridlineProperties')
class MplXTickProperties(MplTickProperties):
"""
Contains all the Artists needed to make an x tick - the tick line,
the label text and the grid line
"""
_input_ports = [
("label1On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("loc", "basic:String",
{'optional': True}),
("major", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("label2On", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("color", "basic:String",
{'optional': True}),
("axes", "basic:String",
{'optional': True}),
("label", "basic:String",
{'optional': True}),
("labelcolor", "basic:String",
{'optional': True}),
("tickdir", "basic:String",
{'optional': True}),
("pad", "basic:String",
{'optional': True}),
("gridOn", "basic:String",
{'optional': True}),
("zorder", "basic:String",
{'optional': True}),
("tick2On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("labelsize", "basic:String",
{'optional': True}),
("width", "basic:String",
{'optional': True}),
("tick1On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("size", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplXTickProperties)")]
class Artist(MplTickProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplTickProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplXTickProperties.Artist()
self.set_output("value", artist)
MplTickProperties.compute(self, artist)
if self.has_input('label1On'):
artist.constructor_props['label1On'] = self.get_input('label1On')
if self.has_input('loc'):
artist.constructor_props['loc'] = self.get_input('loc')
if self.has_input('major'):
artist.constructor_props['major'] = self.get_input('major')
if self.has_input('label2On'):
artist.constructor_props['label2On'] = self.get_input('label2On')
if self.has_input('color'):
artist.constructor_props['color'] = self.get_input('color')
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('label'):
artist.constructor_props['label'] = self.get_input('label')
if self.has_input('labelcolor'):
artist.constructor_props['labelcolor'] = self.get_input('labelcolor')
if self.has_input('tickdir'):
artist.constructor_props['tickdir'] = self.get_input('tickdir')
if self.has_input('pad'):
artist.constructor_props['pad'] = self.get_input('pad')
if self.has_input('gridOn'):
artist.constructor_props['gridOn'] = self.get_input('gridOn')
if self.has_input('zorder'):
artist.constructor_props['zorder'] = self.get_input('zorder')
if self.has_input('tick2On'):
artist.constructor_props['tick2On'] = self.get_input('tick2On')
if self.has_input('labelsize'):
artist.constructor_props['labelsize'] = self.get_input('labelsize')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('tick1On'):
artist.constructor_props['tick1On'] = self.get_input('tick1On')
if self.has_input('size'):
artist.constructor_props['size'] = self.get_input('size')
class MplYTickProperties(MplTickProperties):
"""
Contains all the Artists needed to make a Y tick - the tick line,
the label text and the grid line
"""
_input_ports = [
("label1On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("loc", "basic:String",
{'optional': True}),
("major", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("label2On", "basic:Boolean",
{'optional': True, 'defaults': "['False']"}),
("color", "basic:String",
{'optional': True}),
("axes", "basic:String",
{'optional': True}),
("label", "basic:String",
{'optional': True}),
("labelcolor", "basic:String",
{'optional': True}),
("tickdir", "basic:String",
{'optional': True}),
("pad", "basic:String",
{'optional': True}),
("gridOn", "basic:String",
{'optional': True}),
("zorder", "basic:String",
{'optional': True}),
("tick2On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("labelsize", "basic:String",
{'optional': True}),
("width", "basic:String",
{'optional': True}),
("tick1On", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("size", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplYTickProperties)")]
class Artist(MplTickProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplTickProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplYTickProperties.Artist()
self.set_output("value", artist)
MplTickProperties.compute(self, artist)
if self.has_input('label1On'):
artist.constructor_props['label1On'] = self.get_input('label1On')
if self.has_input('loc'):
artist.constructor_props['loc'] = self.get_input('loc')
if self.has_input('major'):
artist.constructor_props['major'] = self.get_input('major')
if self.has_input('label2On'):
artist.constructor_props['label2On'] = self.get_input('label2On')
if self.has_input('color'):
artist.constructor_props['color'] = self.get_input('color')
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('label'):
artist.constructor_props['label'] = self.get_input('label')
if self.has_input('labelcolor'):
artist.constructor_props['labelcolor'] = self.get_input('labelcolor')
if self.has_input('tickdir'):
artist.constructor_props['tickdir'] = self.get_input('tickdir')
if self.has_input('pad'):
artist.constructor_props['pad'] = self.get_input('pad')
if self.has_input('gridOn'):
artist.constructor_props['gridOn'] = self.get_input('gridOn')
if self.has_input('zorder'):
artist.constructor_props['zorder'] = self.get_input('zorder')
if self.has_input('tick2On'):
artist.constructor_props['tick2On'] = self.get_input('tick2On')
if self.has_input('labelsize'):
artist.constructor_props['labelsize'] = self.get_input('labelsize')
if self.has_input('width'):
artist.constructor_props['width'] = self.get_input('width')
if self.has_input('tick1On'):
artist.constructor_props['tick1On'] = self.get_input('tick1On')
if self.has_input('size'):
artist.constructor_props['size'] = self.get_input('size')
class MplAxisProperties(MplArtistProperties):
"""
Public attributes
* :attr:`axes.transData` - transform data coords to display coords
* :attr:`axes.transAxes` - transform axis coords to display coords
* :attr:`labelpad` - number of points between the axis and its label
"""
_input_ports = [
("pickradius", "basic:String",
{'optional': True, 'docstring': 'Set the depth of the axis used by the picker'}),
("minor_formatter", "basic:String",
{'optional': True, 'docstring': 'Set the formatter of the minor ticker'}),
("smart_bounds", "basic:String",
{'optional': True, 'docstring': 'set the axis to have smart bounds'}),
("ticksSequence", "basic:List",
{'optional': True, 'docstring': 'Set the locations of the tick marks from sequence ticks'}),
("ticksScalar", "basic:Float",
{'docstring': 'Set the locations of the tick marks from sequence ticks', 'optional': True}),
("axes", "basic:String",
{'optional': True}),
("view_interval", "basic:String",
{'optional': True}),
("major_locator", "basic:String",
{'optional': True, 'docstring': 'Set the locator of the major ticker'}),
("major_formatter", "basic:String",
{'optional': True, 'docstring': 'Set the formatter of the major ticker'}),
("ticklabelsSequence", "basic:List",
{'optional': True, 'docstring': 'Set the text values of the tick labels. Return a list of Text instances. Use kwarg minor=True to select minor ticks. All other kwargs are used to update the text object properties. As for get_ticklabels, label1 (left or bottom) is affected for a given tick only if its label1On attribute is True, and similarly for label2. The list of returned label text objects consists of all such label1 objects followed by all such label2 objects.\n\nThe input ticklabels is assumed to match the set of tick locations, regardless of the state of label1On and label2On.'}),
("ticklabelsScalar", "basic:String",
{'docstring': 'Set the text values of the tick labels. Return a list of Text instances. Use kwarg minor=True to select minor ticks. All other kwargs are used to update the text object properties. As for get_ticklabels, label1 (left or bottom) is affected for a given tick only if its label1On attribute is True, and similarly for label2. The list of returned label text objects consists of all such label1 objects followed by all such label2 objects.\n\nThe input ticklabels is assumed to match the set of tick locations, regardless of the state of label1On and label2On.', 'optional': True}),
("clip_path", "basic:String",
{'optional': True}),
("minor_locator", "basic:String",
{'optional': True, 'docstring': 'Set the locator of the minor ticker'}),
("default_intervals", "basic:String",
{'optional': True}),
("scale", "basic:String",
{'optional': True}),
("data_interval", "basic:String",
{'optional': True, 'docstring': 'set the axis data limits'}),
("label_text", "basic:String",
{'optional': True, 'docstring': 'Sets the text value of the axis label'}),
("label_coords", "basic:String",
{'optional': True, 'docstring': 'Set the coordinates of the label. By default, the x coordinate of the y label is determined by the tick label bounding boxes, but this can lead to poor alignment of multiple ylabels if there are multiple axes. Ditto for the y coodinate of the x label.\n\nYou can also specify the coordinate system of the label with the transform. If None, the default coordinate system will be the axes coordinate system (0,0) is (left,bottom), (0.5, 0.5) is middle, etc'}),
("units", "basic:String",
{'optional': True, 'docstring': 'set the units for axis'}),
("tick_params", "basic:String",
{'optional': True, 'docstring': 'Set appearance parameters for ticks and ticklabels.\n\nFor documentation of keyword arguments, see :meth:`matplotlib.axes.Axes.tick_params`.'}),
("majorTickProperties", "MplTickProperties",
{}),
("minorTickProperties", "MplTickProperties",
{}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAxisProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
if 'major_ticks' in self.sub_props:
self.sub_props['major_ticks'].update_props(obj.get_major_ticks())
if 'minor_ticks' in self.sub_props:
self.sub_props['minor_ticks'].update_props(obj.get_minor_ticks())
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAxisProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('pickradius'):
artist.props['pickradius'] = self.get_input('pickradius')
if self.has_input('minor_formatter'):
artist.props['minor_formatter'] = self.get_input('minor_formatter')
if self.has_input('smart_bounds'):
artist.props['smart_bounds'] = self.get_input('smart_bounds')
if self.has_input('ticksSequence'):
artist.props['ticks'] = self.get_input('ticksSequence')
elif self.has_input('ticksScalar'):
artist.props['ticks'] = self.get_input('ticksScalar')
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('view_interval'):
artist.props['view_interval'] = self.get_input('view_interval')
if self.has_input('major_locator'):
artist.props['major_locator'] = self.get_input('major_locator')
if self.has_input('major_formatter'):
artist.props['major_formatter'] = self.get_input('major_formatter')
if self.has_input('ticklabelsSequence'):
artist.props['ticklabels'] = self.get_input('ticklabelsSequence')
elif self.has_input('ticklabelsScalar'):
artist.props['ticklabels'] = self.get_input('ticklabelsScalar')
if self.has_input('clip_path'):
artist.props['clip_path'] = self.get_input('clip_path')
if self.has_input('minor_locator'):
artist.props['minor_locator'] = self.get_input('minor_locator')
if self.has_input('default_intervals'):
artist.props['default_intervals'] = self.get_input('default_intervals')
if self.has_input('scale'):
artist.props['scale'] = self.get_input('scale')
if self.has_input('data_interval'):
artist.props['data_interval'] = self.get_input('data_interval')
if self.has_input('label_text'):
artist.props['label_text'] = self.get_input('label_text')
if self.has_input('label_coords'):
artist.props['label_coords'] = self.get_input('label_coords')
if self.has_input('units'):
artist.props['units'] = self.get_input('units')
if self.has_input('tick_params'):
artist.props['tick_params'] = self.get_input('tick_params')
if self.has_input('majorTickProperties'):
artist.sub_props['major_ticks'] = self.get_input('majorTickProperties')
if self.has_input('minorTickProperties'):
artist.sub_props['minor_ticks'] = self.get_input('minorTickProperties')
class MplXAxisProperties(MplAxisProperties):
"""None
"""
_input_ports = [
("view_interval", "basic:String",
{'optional': True, 'docstring': 'If ignore is False, the order of vmin, vmax does not matter; the original axis orientation will be preserved. In addition, the view limits can be expanded, but will not be reduced. This method is for mpl internal use; for normal use, see :meth:`~matplotlib.axes.Axes.set_xlim`.'}),
("ticks_position", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the ticks position (top, bottom, both, default or none) both sets the ticks to appear on both positions, but does not change the tick labels. 'default' resets the tick positions to the default: ticks on both positions, labels at bottom. 'none' can be used if you don't want any ticks. 'none' and 'both' affect only the ticks, not the labels.", 'values': "[['top', 'bottom', 'both', 'default', 'none']]", 'optional': True}),
("axes", "basic:String",
{'optional': True}),
("label_position", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the label position (top or bottom)', 'values': "[['top', 'bottom']]", 'optional': True}),
("default_intervals", "basic:String",
{'optional': True, 'docstring': 'set the default limits for the axis interval if they are not mutated'}),
("data_interval", "basic:String",
{'optional': True, 'docstring': 'set the axis data limits'}),
("pickradius", "basic:Integer",
{'optional': True, 'defaults': "['15']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplXAxisProperties)")]
class Artist(MplAxisProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplAxisProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplXAxisProperties.Artist()
self.set_output("value", artist)
MplAxisProperties.compute(self, artist)
if self.has_input('view_interval'):
artist.props['view_interval'] = self.get_input('view_interval')
if self.has_input('ticks_position'):
artist.props['ticks_position'] = self.get_input('ticks_position')
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('label_position'):
artist.props['label_position'] = self.get_input('label_position')
if self.has_input('default_intervals'):
artist.props['default_intervals'] = self.get_input('default_intervals')
if self.has_input('data_interval'):
artist.props['data_interval'] = self.get_input('data_interval')
if self.has_input('pickradius'):
artist.constructor_props['pickradius'] = self.get_input('pickradius')
class MplYAxisProperties(MplAxisProperties):
"""None
"""
_input_ports = [
("offset_position", "basic:String",
{'optional': True}),
("view_interval", "basic:String",
{'optional': True, 'docstring': 'If ignore is False, the order of vmin, vmax does not matter; the original axis orientation will be preserved. In addition, the view limits can be expanded, but will not be reduced. This method is for mpl internal use; for normal use, see :meth:`~matplotlib.axes.Axes.set_ylim`.'}),
("ticks_position", "basic:String",
{'entry_types': "['enum']", 'docstring': "Set the ticks position (left, right, both, default or none) 'both' sets the ticks to appear on both positions, but does not change the tick labels. 'default' resets the tick positions to the default: ticks on both positions, labels at left. 'none' can be used if you don't want any ticks. 'none' and 'both' affect only the ticks, not the labels.", 'values': "[['left', 'right', 'both', 'default', 'none']]", 'optional': True}),
("axes", "basic:String",
{'optional': True}),
("label_position", "basic:String",
{'entry_types': "['enum']", 'docstring': 'Set the label position (left or right)', 'values': "[['left', 'right']]", 'optional': True}),
("default_intervals", "basic:String",
{'optional': True, 'docstring': 'set the default limits for the axis interval if they are not mutated'}),
("data_interval", "basic:String",
{'optional': True, 'docstring': 'set the axis data limits'}),
("pickradius", "basic:Integer",
{'optional': True, 'defaults': "['15']"}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplYAxisProperties)")]
class Artist(MplAxisProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplAxisProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplYAxisProperties.Artist()
self.set_output("value", artist)
MplAxisProperties.compute(self, artist)
if self.has_input('offset_position'):
artist.props['offset_position'] = self.get_input('offset_position')
if self.has_input('view_interval'):
artist.props['view_interval'] = self.get_input('view_interval')
if self.has_input('ticks_position'):
artist.props['ticks_position'] = self.get_input('ticks_position')
if self.has_input('axes'):
artist.constructor_props['axes'] = self.get_input('axes')
if self.has_input('label_position'):
artist.props['label_position'] = self.get_input('label_position')
if self.has_input('default_intervals'):
artist.props['default_intervals'] = self.get_input('default_intervals')
if self.has_input('data_interval'):
artist.props['data_interval'] = self.get_input('data_interval')
if self.has_input('pickradius'):
artist.constructor_props['pickradius'] = self.get_input('pickradius')
class MplLegendProperties(MplArtistProperties):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
"""
_input_ports = [
("fancybox", "basic:String",
{'optional': True}),
("handlelength", "basic:String",
{'optional': True}),
("labels", "basic:String",
{'optional': True}),
("labelspacing", "basic:String",
{'optional': True}),
("columnspacing", "basic:String",
{'optional': True}),
("handletextpad", "basic:String",
{'optional': True}),
("ncol", "basic:Integer",
{'optional': True, 'defaults': "['1']"}),
("borderaxespad", "basic:String",
{'optional': True}),
("loc", "basic:String",
{'optional': True}),
("bbox_to_anchor", "basic:String",
{'optional': True, 'docstring': 'set the bbox that the legend will be anchored.\n\nbbox can be a BboxBase instance, a tuple of [left, bottom, width, height] in the given transform (normalized axes coordinate if None), or a tuple of [left, bottom] where the width and height will be assumed to be zero.'}),
("title", "basic:String",
{'optional': True, 'docstring': 'set the legend title. Fontproperties can be optionally set with prop parameter.'}),
("handletextsep", "basic:String",
{'optional': True}),
("numpoints", "basic:String",
{'optional': True}),
("prop", "basic:String",
{'optional': True}),
("handles", "basic:String",
{'optional': True}),
("pad", "basic:String",
{'optional': True}),
("borderpad", "basic:String",
{'optional': True}),
("parent", "basic:String",
{'optional': True}),
("axespad", "basic:String",
{'optional': True}),
("labelsep", "basic:String",
{'optional': True}),
("frame_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether the legend box patch is drawn'}),
("scatterpoints", "basic:Integer",
{'optional': True, 'defaults': "['3']"}),
("fontsize", "basic:String",
{'optional': True}),
("shadow", "basic:String",
{'optional': True}),
("handler_map", "basic:String",
{'optional': True}),
("handleheight", "basic:String",
{'optional': True}),
("scatteryoffsets", "basic:String",
{'optional': True}),
("markerscale", "basic:String",
{'optional': True}),
("frameon", "basic:String",
{'optional': True}),
("mode", "basic:String",
{'optional': True}),
("handlelen", "basic:String",
{'optional': True}),
("default_handler_map", "basic:String",
{'optional': True, 'docstring': 'A class method to set the default handler map.'}),
("bbox_transform", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplLegendProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplLegendProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('fancybox'):
artist.constructor_props['fancybox'] = self.get_input('fancybox')
if self.has_input('handlelength'):
artist.constructor_props['handlelength'] = self.get_input('handlelength')
if self.has_input('labels'):
artist.constructor_props['labels'] = self.get_input('labels')
if self.has_input('labelspacing'):
artist.constructor_props['labelspacing'] = self.get_input('labelspacing')
if self.has_input('columnspacing'):
artist.constructor_props['columnspacing'] = self.get_input('columnspacing')
if self.has_input('handletextpad'):
artist.constructor_props['handletextpad'] = self.get_input('handletextpad')
if self.has_input('ncol'):
artist.constructor_props['ncol'] = self.get_input('ncol')
if self.has_input('borderaxespad'):
artist.constructor_props['borderaxespad'] = self.get_input('borderaxespad')
if self.has_input('loc'):
artist.constructor_props['loc'] = self.get_input('loc')
if self.has_input('bbox_to_anchor'):
artist.props['bbox_to_anchor'] = self.get_input('bbox_to_anchor')
if self.has_input('title'):
artist.props['title'] = self.get_input('title')
if self.has_input('handletextsep'):
artist.constructor_props['handletextsep'] = self.get_input('handletextsep')
if self.has_input('numpoints'):
artist.constructor_props['numpoints'] = self.get_input('numpoints')
if self.has_input('prop'):
artist.constructor_props['prop'] = self.get_input('prop')
if self.has_input('handles'):
artist.constructor_props['handles'] = self.get_input('handles')
if self.has_input('pad'):
artist.constructor_props['pad'] = self.get_input('pad')
if self.has_input('borderpad'):
artist.constructor_props['borderpad'] = self.get_input('borderpad')
if self.has_input('parent'):
artist.constructor_props['parent'] = self.get_input('parent')
if self.has_input('axespad'):
artist.constructor_props['axespad'] = self.get_input('axespad')
if self.has_input('labelsep'):
artist.constructor_props['labelsep'] = self.get_input('labelsep')
if self.has_input('frame_on'):
artist.props['frame_on'] = self.get_input('frame_on')
if self.has_input('scatterpoints'):
artist.constructor_props['scatterpoints'] = self.get_input('scatterpoints')
if self.has_input('fontsize'):
artist.constructor_props['fontsize'] = self.get_input('fontsize')
if self.has_input('shadow'):
artist.constructor_props['shadow'] = self.get_input('shadow')
if self.has_input('handler_map'):
artist.constructor_props['handler_map'] = self.get_input('handler_map')
if self.has_input('handleheight'):
artist.constructor_props['handleheight'] = self.get_input('handleheight')
if self.has_input('scatteryoffsets'):
artist.constructor_props['scatteryoffsets'] = self.get_input('scatteryoffsets')
if self.has_input('markerscale'):
artist.constructor_props['markerscale'] = self.get_input('markerscale')
if self.has_input('frameon'):
artist.constructor_props['frameon'] = self.get_input('frameon')
if self.has_input('mode'):
artist.constructor_props['mode'] = self.get_input('mode')
if self.has_input('handlelen'):
artist.constructor_props['handlelen'] = self.get_input('handlelen')
if self.has_input('default_handler_map'):
artist.props['default_handler_map'] = self.get_input('default_handler_map')
if self.has_input('bbox_transform'):
artist.constructor_props['bbox_transform'] = self.get_input('bbox_transform')
class MplAxesProperties(MplArtistProperties):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
_input_ports = [
("adjustable", "basic:String",
{'entry_types': "['enum']", 'values': "[['box', 'datalim', 'box-forced']]", 'optional': True}),
("cursor_props", "basic:Float",
{'optional': True, 'docstring': 'Set the cursor property as:\n\nax.set_cursor_props(linewidth, color)\n\nor:\n\nax.set_cursor_props((linewidth, color))'}),
("figure", "basic:String",
{'optional': True, 'docstring': 'Set the class:~matplotlib.axes.Axes figure\n\naccepts a class:~matplotlib.figure.Figure instance'}),
("yscale", "basic:String",
{'optional': True, 'docstring': "Call signature:\n\nset_yscale(value)\n\nSet the scaling of the y-axis: 'linear' | 'log' | 'symlog' Different kwargs are accepted, depending on the scale: 'linear'\n\n'log'\n\n\n\n'symlog'"}),
("navigate", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether the axes responds to navigation toolbar commands'}),
("aspect", "basic:String",
{'entry_types': "['enum']", 'docstring': "aspect\n\n\n\nadjustable\n\n\n\n'box' does not allow axes sharing, as this can cause unintended side effect. For cases when sharing axes is fine, use 'box-forced'.\n\nanchor", 'values': "[['auto', 'normal', 'equal', 'num']]", 'optional': True}),
("axis_bgcolor", "basic:Color",
{'optional': True, 'docstring': 'set the axes background color'}),
("ylimSequence", "basic:List",
{'optional': True, 'docstring': 'Call signature:\n\nset_ylim(self, *args, **kwargs):\n\nSet the data limits for the yaxis\n\nExamples:\n\nset_ylim((bottom, top)) set_ylim(bottom, top) set_ylim(bottom=1) # top unchanged set_ylim(top=1) # bottom unchanged\n\nKeyword arguments:\n\n\n\nNote, the bottom (formerly ymin) value may be greater than the top (formerly ymax). For example, suppose y is depth in the ocean. Then one might use:\n\nset_ylim(5000, 0)\n\nso 5000 m depth is at the bottom of the plot and the surface, 0 m, is at the top.\n\nReturns the current ylimits as a length 2 tuple'}),
("ylimScalar", "basic:Float",
{'docstring': 'Call signature:\n\nset_ylim(self, *args, **kwargs):\n\nSet the data limits for the yaxis\n\nExamples:\n\nset_ylim((bottom, top)) set_ylim(bottom, top) set_ylim(bottom=1) # top unchanged set_ylim(top=1) # bottom unchanged\n\nKeyword arguments:\n\n\n\nNote, the bottom (formerly ymin) value may be greater than the top (formerly ymax). For example, suppose y is depth in the ocean. Then one might use:\n\nset_ylim(5000, 0)\n\nso 5000 m depth is at the bottom of the plot and the surface, 0 m, is at the top.\n\nReturns the current ylimits as a length 2 tuple', 'optional': True}),
("sharey", "basic:String",
{'optional': True}),
("xlimSequence", "basic:List",
{'optional': True, 'docstring': 'Call signature:\n\nset_xlim(self, *args, **kwargs):\n\nSet the data limits for the xaxis\n\nExamples:\n\nset_xlim((left, right)) set_xlim(left, right) set_xlim(left=1) # right unchanged set_xlim(right=1) # left unchanged\n\nKeyword arguments:\n\n\n\nNote, the left (formerly xmin) value may be greater than the right (formerly xmax). For example, suppose x is years before present. Then one might use:\n\nset_ylim(5000, 0)\n\nso 5000 years ago is on the left of the plot and the present is on the right.\n\nReturns the current xlimits as a length 2 tuple'}),
("xlimScalar", "basic:Float",
{'docstring': 'Call signature:\n\nset_xlim(self, *args, **kwargs):\n\nSet the data limits for the xaxis\n\nExamples:\n\nset_xlim((left, right)) set_xlim(left, right) set_xlim(left=1) # right unchanged set_xlim(right=1) # left unchanged\n\nKeyword arguments:\n\n\n\nNote, the left (formerly xmin) value may be greater than the right (formerly xmax). For example, suppose x is years before present. Then one might use:\n\nset_ylim(5000, 0)\n\nso 5000 years ago is on the left of the plot and the present is on the right.\n\nReturns the current xlimits as a length 2 tuple', 'optional': True}),
("axis_on", "basic:String",
{'optional': True, 'docstring': 'turn on the axis'}),
("title", "basic:String",
{'optional': True, 'docstring': 'Call signature:\n\nset_title(label, fontdict=None, **kwargs):\n\nSet the title for the axes.'}),
("axisbg", "basic:String",
{'optional': True}),
("label", "basic:String",
{'optional': True, 'defaults': "['']"}),
("xticks", "basic:List",
{'optional': True, 'docstring': 'Set the x ticks with list of ticks'}),
("fig", "basic:String",
{'optional': True}),
("ylabel", "basic:String",
{'optional': True, 'docstring': 'Call signature:\n\nset_ylabel(ylabel, fontdict=None, labelpad=None, **kwargs)\n\nSet the label for the yaxis\n\nlabelpad is the spacing in points between the label and the y-axis'}),
("autoscalex_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether autoscaling for the x-axis is applied on plot commands'}),
("rasterization_zorder", "basic:String",
{'optional': True, 'docstring': 'Set zorder value below which artists will be rasterized. Set to None to disable rasterizing of artists below a particular zorder.'}),
("axes_locator", "basic:String",
{'optional': True, 'docstring': 'set axes_locator'}),
("axisbelow", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether the axis ticks and gridlines are above or below most artists'}),
("frame_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether the axes rectangle patch is drawn'}),
("navigate_mode", "basic:String",
{'optional': True, 'docstring': 'Set the navigation toolbar button status;\n\nthis is not a user-API function.'}),
("xscale", "basic:String",
{'optional': True, 'docstring': "Call signature:\n\nset_xscale(value)\n\nSet the scaling of the x-axis: 'linear' | 'log' | 'symlog' Different kwargs are accepted, depending on the scale: 'linear'\n\n'log'\n\n\n\n'symlog'"}),
("axis_off", "basic:String",
{'optional': True, 'docstring': 'turn off the axis'}),
("autoscale_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether autoscaling is applied on plot commands'}),
("ybound", "basic:String",
{'optional': True, 'docstring': 'Set the lower and upper numerical bounds of the y-axis. This method will honor axes inversion regardless of parameter order. It will not change the _autoscaleYon attribute.'}),
("rect", "basic:String",
{'optional': True}),
("sharex", "basic:String",
{'optional': True}),
("yticklabelsSequence", "basic:List",
{'optional': True, 'docstring': "Call signature:\n\nset_yticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the y tick labels with list of strings labels. Return a list of :class:`~matplotlib.text.Text` instances.\n\nkwargs set :class:`~matplotlib.text.Text` properties for the labels. Valid properties are\n\nagg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance backgroundcolor: any matplotlib color bbox: rectangle prop dict clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color: any matplotlib color contains: a callable function family or fontfamily or fontname or name: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ] figure: a :class:`matplotlib.figure.Figure` instance fontproperties or font_properties: a :class:`matplotlib.font_manager.FontProperties` instance gid: an id string horizontalalignment or ha: [ 'center' | 'right' | 'left' ] label: string or anything printable with '%s' conversion. linespacing: float (multiple of font size) lod: [True | False] multialignment: ['left' | 'right' | 'center' ] path_effects: unknown picker: [None|float|boolean|callable] position: (x,y) rasterized: [True | False | None] rotation: [ angle in degrees | 'vertical' | 'horizontal' ] rotation_mode: unknown size or fontsize: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ] snap: unknown stretch or fontstretch: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ] style or fontstyle: [ 'normal' | 'italic' | 'oblique'] text: string or anything printable with '%s' conversion. transform: :class:`~matplotlib.transforms.Transform` instance url: a url string variant or fontvariant: [ 'normal' | 'small-caps' ] verticalalignment or va or ma: [ 'center' | 'top' | 'bottom' | 'baseline' ] visible: [True | False] weight or fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ] x: float y: float zorder: any number"}),
("yticklabelsScalar", "basic:String",
{'docstring': "Call signature:\n\nset_yticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the y tick labels with list of strings labels. Return a list of :class:`~matplotlib.text.Text` instances.\n\nkwargs set :class:`~matplotlib.text.Text` properties for the labels. Valid properties are\n\nagg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance backgroundcolor: any matplotlib color bbox: rectangle prop dict clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color: any matplotlib color contains: a callable function family or fontfamily or fontname or name: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ] figure: a :class:`matplotlib.figure.Figure` instance fontproperties or font_properties: a :class:`matplotlib.font_manager.FontProperties` instance gid: an id string horizontalalignment or ha: [ 'center' | 'right' | 'left' ] label: string or anything printable with '%s' conversion. linespacing: float (multiple of font size) lod: [True | False] multialignment: ['left' | 'right' | 'center' ] path_effects: unknown picker: [None|float|boolean|callable] position: (x,y) rasterized: [True | False | None] rotation: [ angle in degrees | 'vertical' | 'horizontal' ] rotation_mode: unknown size or fontsize: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ] snap: unknown stretch or fontstretch: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ] style or fontstyle: [ 'normal' | 'italic' | 'oblique'] text: string or anything printable with '%s' conversion. transform: :class:`~matplotlib.transforms.Transform` instance url: a url string variant or fontvariant: [ 'normal' | 'small-caps' ] verticalalignment or va or ma: [ 'center' | 'top' | 'bottom' | 'baseline' ] visible: [True | False] weight or fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ] x: float y: float zorder: any number", 'optional': True}),
("autoscaley_on", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether autoscaling for the y-axis is applied on plot commands'}),
("xmargin", "basic:Float",
{'optional': True, 'docstring': 'Set padding of X data limits prior to autoscaling.\n\nm times the data interval will be added to each end of that interval before it is used in autoscaling.'}),
("color_cycle", "basic:Color",
{'optional': True, 'docstring': 'Set the color cycle for any future plot commands on this Axes.\n\nclist is a list of mpl color specifiers.'}),
("frameon", "basic:Boolean",
{'optional': True, 'defaults': "['True']"}),
("xlabel", "basic:String",
{'optional': True, 'docstring': 'Call signature:\n\nset_xlabel(xlabel, fontdict=None, labelpad=None, **kwargs)\n\nSet the label for the xaxis.\n\nlabelpad is the spacing in points between the label and the x-axis'}),
("xbound", "basic:String",
{'optional': True, 'docstring': 'Set the lower and upper numerical bounds of the x-axis. This method will honor axes inversion regardless of parameter order. It will not change the _autoscaleXon attribute.'}),
("yticks", "basic:List",
{'optional': True, 'docstring': 'Set the y ticks with list of ticks Keyword arguments:'}),
("ymargin", "basic:Float",
{'optional': True, 'docstring': 'Set padding of Y data limits prior to autoscaling.\n\nm times the data interval will be added to each end of that interval before it is used in autoscaling.'}),
("position", "basic:String",
{'optional': True, 'docstring': 'Set the axes position with:\n\npos = [left, bottom, width, height]\n\nin relative 0,1 coords, or pos can be a :class:`~matplotlib.transforms.Bbox`\n\nThere are two position variables: one which is ultimately used, but which may be modified by :meth:`apply_aspect`, and a second which is the starting point for :meth:`apply_aspect`.'}),
("anchor", "basic:String",
{'docstring': 'anchor', 'values': "[['Center', 'bottom left', 'bottom', 'bottom right', 'right', 'top right', 'top', 'top left', 'left']]", 'optional': True}),
("xticklabelsSequence", "basic:List",
{'optional': True, 'docstring': "Call signature:\n\nset_xticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the xtick labels with list of strings labels. Return a list of axis text instances.\n\nkwargs set the :class:`~matplotlib.text.Text` properties. Valid properties are\n\nagg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance backgroundcolor: any matplotlib color bbox: rectangle prop dict clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color: any matplotlib color contains: a callable function family or fontfamily or fontname or name: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ] figure: a :class:`matplotlib.figure.Figure` instance fontproperties or font_properties: a :class:`matplotlib.font_manager.FontProperties` instance gid: an id string horizontalalignment or ha: [ 'center' | 'right' | 'left' ] label: string or anything printable with '%s' conversion. linespacing: float (multiple of font size) lod: [True | False] multialignment: ['left' | 'right' | 'center' ] path_effects: unknown picker: [None|float|boolean|callable] position: (x,y) rasterized: [True | False | None] rotation: [ angle in degrees | 'vertical' | 'horizontal' ] rotation_mode: unknown size or fontsize: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ] snap: unknown stretch or fontstretch: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ] style or fontstyle: [ 'normal' | 'italic' | 'oblique'] text: string or anything printable with '%s' conversion. transform: :class:`~matplotlib.transforms.Transform` instance url: a url string variant or fontvariant: [ 'normal' | 'small-caps' ] verticalalignment or va or ma: [ 'center' | 'top' | 'bottom' | 'baseline' ] visible: [True | False] weight or fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ] x: float y: float zorder: any number"}),
("xticklabelsScalar", "basic:String",
{'docstring': "Call signature:\n\nset_xticklabels(labels, fontdict=None, minor=False, **kwargs)\n\nSet the xtick labels with list of strings labels. Return a list of axis text instances.\n\nkwargs set the :class:`~matplotlib.text.Text` properties. Valid properties are\n\nagg_filter: unknown alpha: float (0.0 transparent through 1.0 opaque) animated: [True | False] axes: an :class:`~matplotlib.axes.Axes` instance backgroundcolor: any matplotlib color bbox: rectangle prop dict clip_box: a :class:`matplotlib.transforms.Bbox` instance clip_on: [True | False] clip_path: [ (:class:`~matplotlib.path.Path`, :class:`~matplotlib.transforms.Transform`) | :class:`~matplotlib.patches.Patch` | None ] color: any matplotlib color contains: a callable function family or fontfamily or fontname or name: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ] figure: a :class:`matplotlib.figure.Figure` instance fontproperties or font_properties: a :class:`matplotlib.font_manager.FontProperties` instance gid: an id string horizontalalignment or ha: [ 'center' | 'right' | 'left' ] label: string or anything printable with '%s' conversion. linespacing: float (multiple of font size) lod: [True | False] multialignment: ['left' | 'right' | 'center' ] path_effects: unknown picker: [None|float|boolean|callable] position: (x,y) rasterized: [True | False | None] rotation: [ angle in degrees | 'vertical' | 'horizontal' ] rotation_mode: unknown size or fontsize: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ] snap: unknown stretch or fontstretch: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ] style or fontstyle: [ 'normal' | 'italic' | 'oblique'] text: string or anything printable with '%s' conversion. transform: :class:`~matplotlib.transforms.Transform` instance url: a url string variant or fontvariant: [ 'normal' | 'small-caps' ] verticalalignment or va or ma: [ 'center' | 'top' | 'bottom' | 'baseline' ] visible: [True | False] weight or fontweight: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ] x: float y: float zorder: any number", 'optional': True}),
("titleProperties", "MplTextProperties",
{}),
("xaxisProperties", "MplXAxisProperties",
{}),
("yaxisProperties", "MplYAxisProperties",
{}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAxesProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
if 'title' in self.sub_props:
self.sub_props['title'].update_props(obj.title)
if 'xaxis' in self.sub_props:
self.sub_props['xaxis'].update_props(obj.xaxis)
if 'yaxis' in self.sub_props:
self.sub_props['yaxis'].update_props(obj.yaxis)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAxesProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('adjustable'):
artist.props['adjustable'] = self.get_input('adjustable')
if self.has_input('cursor_props'):
artist.props['cursor_props'] = self.get_input('cursor_props')
if self.has_input('figure'):
artist.props['figure'] = self.get_input('figure')
if self.has_input('yscale'):
artist.props['yscale'] = self.get_input('yscale')
if self.has_input('navigate'):
artist.props['navigate'] = self.get_input('navigate')
if self.has_input('aspect'):
artist.props['aspect'] = self.get_input('aspect')
if self.has_input('axis_bgcolor'):
artist.props['axis_bgcolor'] = self.get_input('axis_bgcolor')
artist.props['axis_bgcolor'] = translate_color(artist.props['axis_bgcolor'])
if self.has_input('ylimSequence'):
artist.props['ylim'] = self.get_input('ylimSequence')
elif self.has_input('ylimScalar'):
artist.props['ylim'] = self.get_input('ylimScalar')
if self.has_input('sharey'):
artist.constructor_props['sharey'] = self.get_input('sharey')
if self.has_input('xlimSequence'):
artist.props['xlim'] = self.get_input('xlimSequence')
elif self.has_input('xlimScalar'):
artist.props['xlim'] = self.get_input('xlimScalar')
if self.has_input('axis_on'):
artist.props['axis_on'] = self.get_input('axis_on')
if self.has_input('title'):
artist.props['title'] = self.get_input('title')
if self.has_input('axisbg'):
artist.constructor_props['axisbg'] = self.get_input('axisbg')
if self.has_input('label'):
artist.constructor_props['label'] = self.get_input('label')
if self.has_input('xticks'):
artist.props['xticks'] = self.get_input('xticks')
if self.has_input('fig'):
artist.constructor_props['fig'] = self.get_input('fig')
if self.has_input('ylabel'):
artist.props['ylabel'] = self.get_input('ylabel')
if self.has_input('autoscalex_on'):
artist.props['autoscalex_on'] = self.get_input('autoscalex_on')
if self.has_input('rasterization_zorder'):
artist.props['rasterization_zorder'] = self.get_input('rasterization_zorder')
if self.has_input('axes_locator'):
artist.props['axes_locator'] = self.get_input('axes_locator')
if self.has_input('axisbelow'):
artist.props['axisbelow'] = self.get_input('axisbelow')
if self.has_input('frame_on'):
artist.props['frame_on'] = self.get_input('frame_on')
if self.has_input('navigate_mode'):
artist.props['navigate_mode'] = self.get_input('navigate_mode')
if self.has_input('xscale'):
artist.props['xscale'] = self.get_input('xscale')
if self.has_input('axis_off'):
artist.props['axis_off'] = self.get_input('axis_off')
if self.has_input('autoscale_on'):
artist.props['autoscale_on'] = self.get_input('autoscale_on')
if self.has_input('ybound'):
artist.props['ybound'] = self.get_input('ybound')
if self.has_input('rect'):
artist.constructor_props['rect'] = self.get_input('rect')
if self.has_input('sharex'):
artist.constructor_props['sharex'] = self.get_input('sharex')
if self.has_input('yticklabelsSequence'):
artist.props['yticklabels'] = self.get_input('yticklabelsSequence')
elif self.has_input('yticklabelsScalar'):
artist.props['yticklabels'] = self.get_input('yticklabelsScalar')
if self.has_input('autoscaley_on'):
artist.props['autoscaley_on'] = self.get_input('autoscaley_on')
if self.has_input('xmargin'):
artist.props['xmargin'] = self.get_input('xmargin')
if self.has_input('color_cycle'):
artist.props['color_cycle'] = self.get_input('color_cycle')
artist.props['color_cycle'] = translate_color(artist.props['color_cycle'])
if self.has_input('frameon'):
artist.constructor_props['frameon'] = self.get_input('frameon')
if self.has_input('xlabel'):
artist.props['xlabel'] = self.get_input('xlabel')
if self.has_input('xbound'):
artist.props['xbound'] = self.get_input('xbound')
if self.has_input('yticks'):
artist.props['yticks'] = self.get_input('yticks')
if self.has_input('ymargin'):
artist.props['ymargin'] = self.get_input('ymargin')
if self.has_input('position'):
artist.props['position'] = self.get_input('position')
if self.has_input('anchor'):
artist.props['anchor'] = self.get_input('anchor')
artist.props['anchor'] = translate_MplAxesProperties_anchor(artist.props['anchor'])
if self.has_input('xticklabelsSequence'):
artist.props['xticklabels'] = self.get_input('xticklabelsSequence')
elif self.has_input('xticklabelsScalar'):
artist.props['xticklabels'] = self.get_input('xticklabelsScalar')
if self.has_input('titleProperties'):
artist.sub_props['title'] = self.get_input('titleProperties')
if self.has_input('xaxisProperties'):
artist.sub_props['xaxis'] = self.get_input('xaxisProperties')
if self.has_input('yaxisProperties'):
artist.sub_props['yaxis'] = self.get_input('yaxisProperties')
class MplAxesSubplotProperties(MplAxesProperties):
"""None
"""
_input_ports = [
("fig", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAxesSubplotProperties)")]
class Artist(MplAxesProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplAxesProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAxesSubplotProperties.Artist()
self.set_output("value", artist)
MplAxesProperties.compute(self, artist)
if self.has_input('fig'):
artist.constructor_props['fig'] = self.get_input('fig')
class MplFigureProperties(MplArtistProperties):
"""
The Figure instance supports callbacks through a *callbacks*
attribute which is a :class:`matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'dpi_changed', and
the callback will be called with ``func(fig)`` where fig is the
:class:`Figure` instance.
*patch*
The figure patch is drawn by a
:class:`matplotlib.patches.Rectangle` instance
*suppressComposite*
For multiple figure images, the figure will make composite
images depending on the renderer option_image_nocomposite
function. If suppressComposite is True|False, this will
override the renderer.
"""
_input_ports = [
("edgecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the edge color of the Figure rectangle'}),
("canvas", "basic:String",
{'optional': True, 'docstring': 'Set the canvas the contains the figure'}),
("facecolor", "basic:Color",
{'optional': True, 'docstring': 'Set the face color of the Figure rectangle'}),
("size_inches", "basic:String",
{'optional': True, 'docstring': 'set_size_inches(w,h, forward=False)\n\nSet the figure size in inches\n\nUsage:\n\nfig.set_size_inches(w,h) # OR fig.set_size_inches((w,h) )\n\noptional kwarg forward=True will cause the canvas size to be automatically updated; eg you can resize the figure window from the shell'}),
("figwidth", "basic:Float",
{'optional': True, 'docstring': 'Set the width of the figure in inches'}),
("frameon", "basic:Boolean",
{'optional': True, 'docstring': 'Set whether the figure frame (background) is displayed or invisible'}),
("subplotpars", "basic:String",
{'optional': True}),
("figheight", "basic:Float",
{'optional': True, 'docstring': 'Set the height of the figure in inches'}),
("figsize", "basic:String",
{'optional': True}),
("linewidth", "basic:Float",
{'optional': True, 'defaults': "['0.0']"}),
("tight_layout", "basic:Boolean",
{'optional': True, 'docstring': "Set whether :meth:`tight_layout` is used upon drawing. If None, the rcParams['figure.autolayout'] value will be set."}),
("dpi", "basic:Float",
{'optional': True, 'docstring': 'Set the dots-per-inch of the figure'}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplFigureProperties)")]
class Artist(MplArtistProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplArtistProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplFigureProperties.Artist()
self.set_output("value", artist)
MplArtistProperties.compute(self, artist)
if self.has_input('edgecolor'):
artist.props['edgecolor'] = self.get_input('edgecolor')
artist.props['edgecolor'] = translate_color(artist.props['edgecolor'])
if self.has_input('canvas'):
artist.props['canvas'] = self.get_input('canvas')
if self.has_input('facecolor'):
artist.props['facecolor'] = self.get_input('facecolor')
artist.props['facecolor'] = translate_color(artist.props['facecolor'])
if self.has_input('size_inches'):
artist.props['size_inches'] = self.get_input('size_inches')
if self.has_input('figwidth'):
artist.props['figwidth'] = self.get_input('figwidth')
if self.has_input('frameon'):
artist.props['frameon'] = self.get_input('frameon')
if self.has_input('subplotpars'):
artist.constructor_props['subplotpars'] = self.get_input('subplotpars')
if self.has_input('figheight'):
artist.props['figheight'] = self.get_input('figheight')
if self.has_input('figsize'):
artist.constructor_props['figsize'] = self.get_input('figsize')
if self.has_input('linewidth'):
artist.constructor_props['linewidth'] = self.get_input('linewidth')
if self.has_input('tight_layout'):
artist.props['tight_layout'] = self.get_input('tight_layout')
if self.has_input('dpi'):
artist.props['dpi'] = self.get_input('dpi')
class MplAnnotationProperties(MplTextProperties):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
_input_ports = [
("xycoords", "basic:String",
{'entry_types': "['enum']", 'values': "[['figure points', 'figure pixels', 'figure fraction', 'axes points', 'axes pixels', 'axes fraction', 'data', 'offset points', 'polar']]", 'optional': True, 'defaults': "['data']"}),
("figure", "basic:String",
{'optional': True}),
("annotation_clip", "basic:String",
{'optional': True}),
("xytext", "basic:String",
{'optional': True}),
("s", "basic:String",
{'optional': True}),
("xy", "basic:String",
{'optional': True}),
("textcoords", "basic:String",
{'entry_types': "['enum']", 'values': "[['figure points', 'figure pixels', 'figure fraction', 'axes points', 'axes pixels', 'axes fraction', 'data', 'offset points', 'polar']]", 'optional': True}),
("arrowprops", "basic:String",
{'optional': True}),
]
# only one output port: 'value'
_output_ports = [("value", "(MplAnnotationProperties)")]
class Artist(MplTextProperties.Artist):
def __init__(self):
self.props = {}
self.constructor_props = {}
self.not_setp_props = {}
self.sub_props = {}
def update_props(self, objs):
matplotlib.artist.setp(objs, **self.props)
if not matplotlib.cbook.iterable(objs):
objs_iter = [objs]
else:
objs_iter = matplotlib.cbook.flatten(objs)
for obj in objs_iter:
for attr_name, attr_val in self.not_setp_props.iteritems():
setattr(obj, attr_name, attr_val)
self.update_sub_props(objs)
def update_sub_props(self, objs):
MplTextProperties.Artist.update_sub_props(self, objs)
def update_kwargs(self, kwargs):
kwargs.update(self.constructor_props)
kwargs.update(self.props)
def compute(self, artist=None):
if artist is None:
artist = MplAnnotationProperties.Artist()
self.set_output("value", artist)
MplTextProperties.compute(self, artist)
if self.has_input('xycoords'):
artist.constructor_props['xycoords'] = self.get_input('xycoords')
if self.has_input('figure'):
artist.props['figure'] = self.get_input('figure')
if self.has_input('annotation_clip'):
artist.constructor_props['annotation_clip'] = self.get_input('annotation_clip')
if self.has_input('xytext'):
artist.constructor_props['xytext'] = self.get_input('xytext')
if self.has_input('s'):
artist.constructor_props['s'] = self.get_input('s')
if self.has_input('xy'):
artist.constructor_props['xy'] = self.get_input('xy')
if self.has_input('textcoords'):
artist.constructor_props['textcoords'] = self.get_input('textcoords')
if self.has_input('arrowprops'):
artist.constructor_props['arrowprops'] = self.get_input('arrowprops')
_modules = [
MplArtistProperties,
Mpl_AxesImageBaseProperties,
MplAxesImageProperties,
MplNonUniformImageProperties,
MplBboxImageProperties,
MplPcolorImageProperties,
MplFigureImageProperties,
MplCollectionProperties,
MplPathCollectionProperties,
MplPolyCollectionProperties,
MplBrokenBarHCollectionProperties,
MplRegularPolyCollectionProperties,
MplStarPolygonCollectionProperties,
MplAsteriskPolygonCollectionProperties,
MplLineCollectionProperties,
MplCircleCollectionProperties,
MplEllipseCollectionProperties,
MplPatchCollectionProperties,
MplTriMeshProperties,
MplQuadMeshProperties,
MplPatchProperties,
MplShadowProperties,
MplRectangleProperties,
MplRegularPolygonProperties,
MplCirclePolygonProperties,
MplPathPatchProperties,
MplPolygonProperties,
MplFancyArrowProperties,
MplWedgeProperties,
MplArrowProperties,
MplYAArrowProperties,
MplEllipseProperties,
MplCircleProperties,
MplArcProperties,
MplFancyBboxPatchProperties,
MplFancyArrowPatchProperties,
MplConnectionPatchProperties,
MplLine2DProperties,
MplTextProperties,
MplTextWithDashProperties,
MplTickProperties,
MplXTickProperties,
MplYTickProperties,
MplAxisProperties,
MplXAxisProperties,
MplYAxisProperties,
MplLegendProperties,
MplAxesProperties,
MplAxesSubplotProperties,
MplFigureProperties,
MplAnnotationProperties,
]
|
bsd-3-clause
|
stonneau/cwc_tests
|
src/tools/plot_utils.py
|
2
|
11856
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 16 09:16:56 2015
@author: adelpret
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
DEFAULT_FONT_SIZE = 40;
DEFAULT_AXIS_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_LINE_WIDTH = 8; #13;
DEFAULT_MARKER_SIZE = 6;
DEFAULT_FONT_FAMILY = 'sans-serif'
DEFAULT_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_FONT_SERIF = ['Times New Roman', 'Times','Bitstream Vera Serif', 'DejaVu Serif', 'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', 'Nimbus Roman No9 L', 'Palatino', 'Charter', 'serif'];
DEFAULT_FIGURE_FACE_COLOR = 'white' # figure facecolor; 0.75 is scalar gray
DEFAULT_LEGEND_FONT_SIZE = DEFAULT_FONT_SIZE;
DEFAULT_AXES_LABEL_SIZE = DEFAULT_FONT_SIZE; # fontsize of the x any y labels
DEFAULT_TEXT_USE_TEX = True;
LINE_ALPHA = 0.9;
SAVE_FIGURES = False;
FILE_EXTENSIONS = ['png']; #,'eps'];
FIGURES_DPI = 150;
SHOW_LEGENDS = False;
LEGEND_ALPHA = 0.5;
SHOW_FIGURES = False;
FIGURE_PATH = './';
LINE_WIDTH_RED = 0; # reduction of line width when plotting multiple lines on same plot
LINE_WIDTH_MIN = 1;
BOUNDS_COLOR = 'silver';
#legend.framealpha : 1.0 # opacity of of legend frame
#axes.hold : True # whether to clear the axes by default on
#axes.linewidth : 1.0 # edge linewidth
#axes.titlesize : large # fontsize of the axes title
#axes.color_cycle : b, g, r, c, m, y, k # color cycle for plot lines
#xtick.labelsize : medium # fontsize of the tick labels
#figure.dpi : 80 # figure dots per inch
#image.cmap : jet # gray | jet etc...
#savefig.dpi : 100 # figure dots per inch
#savefig.facecolor : white # figure facecolor when saving
#savefig.edgecolor : white # figure edgecolor when saving
#savefig.format : png # png, ps, pdf, svg
#savefig.jpeg_quality: 95 # when a jpeg is saved, the default quality parameter.
#savefig.directory : ~ # default directory in savefig dialog box,
# leave empty to always use current working directory
def create_empty_figure(nRows=1, nCols=1, spinesPos=None,sharex=True):
f, ax = plt.subplots(nRows,nCols,sharex=sharex);
mngr = plt.get_current_fig_manager()
mngr.window.setGeometry(50,50,1080,720);
if(spinesPos!=None):
if(nRows*nCols>1):
for axis in ax.reshape(nRows*nCols):
movePlotSpines(axis, spinesPos);
else:
movePlotSpines(ax, spinesPos);
return (f, ax);
def movePlotSpines(ax, spinesPos):
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',spinesPos[0]))
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',spinesPos[1]))
def setAxisFontSize(ax, size):
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(size)
label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65))
mpl.rcdefaults()
mpl.rcParams['lines.linewidth'] = DEFAULT_LINE_WIDTH;
mpl.rcParams['lines.markersize'] = DEFAULT_MARKER_SIZE;
mpl.rcParams['font.family'] = DEFAULT_FONT_FAMILY;
mpl.rcParams['font.size'] = DEFAULT_FONT_SIZE;
mpl.rcParams['font.serif'] = DEFAULT_FONT_SERIF;
mpl.rcParams['text.usetex'] = DEFAULT_TEXT_USE_TEX;
mpl.rcParams['axes.labelsize'] = DEFAULT_AXES_LABEL_SIZE;
mpl.rcParams['legend.fontsize'] = DEFAULT_LEGEND_FONT_SIZE;
mpl.rcParams['figure.facecolor'] = DEFAULT_FIGURE_FACE_COLOR;
mpl.rcParams['figure.figsize'] = 12, 9 #23, 12 #
def plot3dQuantity(quantity, title, ax=None, boundUp=None, boundLow=None, yscale='linear', linestyle='k'):
return plotNdQuantity(3, 1, quantity, title, ax, boundUp, boundLow, yscale, linestyle);
def plotNdQuantity(nRows, nCols, quantity, title="", ax=None, boundUp=None, boundLow=None, yscale='linear',
linestyle='k--', sharey=False, margins=None):
t = quantity.shape[0];
n = quantity.shape[1];
if(margins!=None):
if(type(margins) is list):
margins = [margins[0].reshape(t,1,n), margins[1].reshape(t,1,n)];
else:
margins = margins.reshape(t,1,n);
return plotNdQuantityPerSolver(nRows, nCols, quantity.reshape(t,1,n), title, None, [linestyle], ax,
boundUp, boundLow, yscale, None, None, sharey, margins);
def plotNdQuantityPerSolver(nRows, nCols, quantity, title, solver_names, line_styles, ax=None, boundUp=None, boundLow=None,
yscale='linear', subplot_titles=None, ylabels=None, sharey=False, margins=None, x=None):
if(ax==None):
f, ax = plt.subplots(nRows, nCols, sharex=True, sharey=sharey);
ax = ax.reshape(nRows, nCols);
k = 0;
if(x==None):
x = range(quantity.shape[0]);
for j in range(nCols):
for i in range(nRows):
if(k<quantity.shape[2]):
if(subplot_titles!=None):
ax[i,j].set_title(subplot_titles[k]);
elif(i==0):
ax[i,j].set_title(str(k)); # set titles on first row only
if(ylabels!=None):
ax[i,j].set_ylabel(ylabels[k]);
ymin = np.min(quantity[:,:,k]);
ymax = np.max(quantity[:,:,k]);
if(boundUp!=None):
if(len(boundUp.shape)==1): # constant bound
if(boundUp[k]<2*ymax):
ymax = np.max([ymax,boundUp[k]]);
ax[i,j].plot([0, quantity.shape[0]-1], [boundUp[k], boundUp[k]], '--', color=BOUNDS_COLOR, alpha=LINE_ALPHA);
elif(len(boundUp.shape)==2): # bound variable in time but constant for each solver
if(np.max(boundUp[:,k])<2*ymax):
ymax = np.max(np.concatenate(([ymax],boundUp[:,k])));
ax[i,j].plot(boundUp[:,k], '--', color=BOUNDS_COLOR, label='Upper bound', alpha=LINE_ALPHA);
if(boundLow!=None):
if(len(boundLow.shape)==1):
if(boundLow[k]>2*ymin):
ymin = np.min([ymin,boundLow[k]]);
ax[i,j].plot([0, quantity.shape[0]-1], [boundLow[k], boundLow[k]], '--', color=BOUNDS_COLOR, alpha=LINE_ALPHA);
else:
if(np.min(boundLow[:,k])>2*ymin):
ymin = np.min(np.concatenate(([ymin],boundLow[:,k])));
ax[i,j].plot(boundLow[:,k], '--', color=BOUNDS_COLOR, label='Lower bound', alpha=LINE_ALPHA);
lw = DEFAULT_LINE_WIDTH;
for s in range(quantity.shape[1]):
p, = ax[i,j].plot(x, quantity[:,s,k], line_styles[s], alpha=LINE_ALPHA, linewidth=lw);
if(margins!=None):
if(type(margins) is list):
mp = margins[0];
mn = margins[1];
else:
mp = margins;
mn = margins;
ymax = np.max(np.concatenate(([ymax],quantity[:,s,k]+mp[:,s,k])));
ymin = np.min(np.concatenate(([ymin],quantity[:,s,k]-mn[:,s,k])));
ax[i,j].fill_between(x, quantity[:,s,k]+mp[:,s,k], quantity[:,s,k]-mn[:,s,k], alpha=0.15, linewidth=0, facecolor='green');
if(solver_names!=None):
p.set_label(solver_names[s]);
lw=max(LINE_WIDTH_MIN,lw-LINE_WIDTH_RED);
ax[i,j].set_yscale(yscale);
ax[i,j].xaxis.set_ticks(np.arange(0, x[-1], x[-1]/2));
ax[i,j].yaxis.set_ticks([ymin, ymax]);
if(ymax-ymin>5.0):
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f'));
elif(ymax-ymin>0.5):
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'));
else:
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'));
if(sharey==False):
ax[i,j].set_ylim([ymin-0.1*(ymax-ymin), ymax+0.1*(ymax-ymin)]);
k += 1;
else:
ax[i,j].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.0f'));
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
else:
ax[nRows/2,0].set_ylabel(title);
if(SHOW_LEGENDS):
# leg = ax[0,0].legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
leg = ax[0,0].legend(loc='best');
# leg.get_frame().set_alpha(LEGEND_ALPHA)
return ax;
def plotQuantityPerSolver(quantity, title, solver_names, line_styles, yscale='linear', ylabel='',
x=None, xlabel='', legend_location='best'):
f, ax = plt.subplots();
lw = DEFAULT_LINE_WIDTH;
if(x==None):
x = range(quantity.shape[0]);
for i in range(len(solver_names)):
ax.plot(x, quantity[:,i], line_styles[i], alpha=LINE_ALPHA, linewidth=lw);
lw=max(lw-LINE_WIDTH_RED,LINE_WIDTH_MIN);
ax.set_yscale(yscale);
ax.set_ylabel(ylabel);
ax.set_xlabel(xlabel);
ymin = np.min(quantity);
ymax = np.max(quantity);
ax.set_ylim([ymin-0.1*(ymax-ymin), ymax+0.1*(ymax-ymin)]);
if(SHOW_LEGENDS):
leg = ax.legend(solver_names, loc=legend_location);
leg.get_frame().set_alpha(LEGEND_ALPHA)
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
elif(ylabel==''):
ax.set_ylabel(title);
def plotQuantityVsQuantityPerSolver(quantity, quantityPerSolver, legend, solver_names, line_styles, yscale='linear'):
r=0;
c=0;
if(len(solver_names)==4 or len(solver_names)==3):
r=2;
c=2;
elif(len(solver_names)==5 or len(solver_names)==6):
r=2;
c=3;
else:
print "ERROR in plotQuantityVsQuantityPerSolver, number of solvers not managed";
return;
f, ax = plt.subplots(r, c, sharex=True, sharey=True);
for i in range(len(solver_names)):
ax[i/c,i%c].plot(quantity[:,i], 'kx-', quantityPerSolver[:,i], line_styles[i], alpha=LINE_ALPHA);
ax[i/c,i%c].set_ylabel(solver_names[i]);
ax[i/c,i%c].set_yscale(yscale);
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
f.savefig(FIGURE_PATH+(legend[0]+'_VS_'+legend[1]).replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
if(SHOW_LEGENDS):
leg = ax[0,0].legend(legend, loc='best');
leg.get_frame().set_alpha(LEGEND_ALPHA)
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def saveFigure(title):
if(SAVE_FIGURES):
for ext in FILE_EXTENSIONS:
plt.gcf().savefig(FIGURE_PATH+title.replace(' ', '_')+'.'+ext, format=ext, dpi=FIGURES_DPI, bbox_inches='tight');
|
gpl-3.0
|
kashif/scikit-learn
|
sklearn/ensemble/tests/test_voting_classifier.py
|
22
|
6543
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
|
bsd-3-clause
|
kagayakidan/scikit-learn
|
sklearn/metrics/cluster/supervised.py
|
207
|
27395
|
"""Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
|
bsd-3-clause
|
rs2/pandas
|
pandas/tests/groupby/test_function.py
|
1
|
33782
|
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummin()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby("a").b.cummin()
expected = pd.Series([1, 2, 1], name="b")
tm.assert_series_equal(result, expected)
def test_cummin_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
def test_cummax(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
max_val = numpy_dtypes_for_minmax[2]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_maxs}).astype(dtype)
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ max value for dtype
df.loc[[2, 6], "B"] = max_val
expected.loc[[2, 3, 6, 7], "B"] = max_val
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummax()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby("a").b.cummax()
expected = pd.Series([2, 1, 2], name="b")
tm.assert_series_equal(result, expected)
def test_cummax_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_increasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_decreasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level="first")
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ["C"]].groupby(df["A"])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == "A"
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))],
)
group = pd.DataFrame(group.values, columns=group_col, index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame(
{
"x": [1, 2, 3, 4, 5] * 3,
"y": [10, 20, 30, 40, 50] * 3,
"z": [100, 200, 300, 400, 500] * 3,
}
)
df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={"k": "key"})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby("k").describe()
with pytest.raises(ValueError, match=msg):
df2.groupby("key").describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 24990,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 25499,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 25499,
}
volumes = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
}
df = pd.DataFrame({"PRICE": prices, "VOLUME": volumes})
result = df.groupby("PRICE").VOLUME.describe()
data = [
df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
]
expected = pd.DataFrame(
data,
index=pd.Index([24990, 25499], name="PRICE"),
columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:"
"indexing past lexsort depth may impact performance:"
"pandas.errors.PerformanceWarning"
)
@pytest.mark.parametrize("as_index", [True, False])
def test_describe_with_duplicate_output_column_names(as_index):
# GH 35314
df = pd.DataFrame(
{
"a": [99, 99, 99, 88, 88, 88],
"b": [1, 2, 3, 4, 5, 6],
"c": [10, 20, 30, 40, 50, 60],
},
columns=["a", "b", "b"],
)
expected = (
pd.DataFrame.from_records(
[
("a", "count", 3.0, 3.0),
("a", "mean", 88.0, 99.0),
("a", "std", 0.0, 0.0),
("a", "min", 88.0, 99.0),
("a", "25%", 88.0, 99.0),
("a", "50%", 88.0, 99.0),
("a", "75%", 88.0, 99.0),
("a", "max", 88.0, 99.0),
("b", "count", 3.0, 3.0),
("b", "mean", 5.0, 2.0),
("b", "std", 1.0, 1.0),
("b", "min", 4.0, 1.0),
("b", "25%", 4.5, 1.5),
("b", "50%", 5.0, 2.0),
("b", "75%", 5.5, 2.5),
("b", "max", 6.0, 3.0),
("b", "count", 3.0, 3.0),
("b", "mean", 5.0, 2.0),
("b", "std", 1.0, 1.0),
("b", "min", 4.0, 1.0),
("b", "25%", 4.5, 1.5),
("b", "50%", 5.0, 2.0),
("b", "75%", 5.5, 2.5),
("b", "max", 6.0, 3.0),
],
)
.set_index([0, 1])
.T
)
expected.columns.names = [None, None]
expected.index = pd.Index([88, 99], name="a")
if as_index:
expected = expected.drop(columns=["a"], level=0)
else:
expected = expected.reset_index(drop=True)
result = df.groupby("a", as_index=as_index).describe()
tm.assert_frame_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame(
{
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744],
}
)
assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
def test_apply_to_nullable_integer_returns_float(values, function):
# https://github.com/pandas-dev/pandas/issues/32219
output = 0.5 if function == "var" else 1.5
arr = np.array([output] * 3, dtype=float)
idx = pd.Index([1, 2, 3], dtype=object, name="a")
expected = pd.DataFrame({"b": arr}, index=idx)
groups = pd.DataFrame(values, dtype="Int64").groupby("a")
result = getattr(groups, function)()
tm.assert_frame_equal(result, expected)
result = groups.agg(function)
tm.assert_frame_equal(result, expected)
result = groups.agg([function])
expected.columns = MultiIndex.from_tuples([("b", function)])
tm.assert_frame_equal(result, expected)
def test_groupby_sum_below_mincount_nullable_integer():
# https://github.com/pandas-dev/pandas/issues/32861
df = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
grouped = df.groupby("a")
idx = pd.Index([0, 1, 2], dtype=object, name="a")
result = grouped["b"].sum(min_count=2)
expected = pd.Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
tm.assert_series_equal(result, expected)
result = grouped.sum(min_count=2)
expected = pd.DataFrame(
{"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx
)
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
sinhrks/pandas-ml
|
pandas_ml/snsaccessors/base.py
|
1
|
7540
|
#!/usr/bin/env python
import pandas as pd
from pandas_ml.core.accessor import _AccessorMethods, _attach_methods
class SeabornMethods(_AccessorMethods):
"""Accessor to ``sklearn.cluster``."""
_module_name = 'seaborn'
_module_attrs = ['palplot', 'set', 'axes_style', 'plotting_context',
'set_context', 'set_color_codes', 'reset_defaults',
'reset_orig', 'set_palette', 'color_palette',
'husl_palette', 'hls_palette', 'cubehelix_palette',
'dark_palette', 'light_palette', 'diverging_palette',
'blend_palette', 'xkcd_palette', 'crayon_palette',
'mpl_palette', 'choose_colorbrewer_palette',
'choose_cubehelix_palette', 'choose_light_palette',
'choose_dark_palette', 'choose_diverging_palette',
'despine', 'desaturate', 'saturate', 'set_hls_values',
'ci_to_errsize', 'axlabel']
def _maybe_target_name(self, value, key):
if value is None:
if not self._df.has_target():
msg = ("{key} can't be ommitted when ModelFrame doesn't have "
"target column")
raise ValueError(msg.format(key=key))
elif self._df.has_multi_targets():
msg = ("{key} can't be ommitted when ModelFrame has multiple "
"target columns")
raise ValueError(msg.format(key=key))
value = self._df.target_name
return value
def _maybe_target_series(self, value, key):
if value is None:
if not self._df.has_target():
msg = ("{key} can't be ommitted when ModelFrame doesn't have "
"target column")
raise ValueError(msg.format(key=key))
elif self._df.has_multi_targets():
msg = ("{key} can't be ommitted when ModelFrame has multiple "
"target columns")
raise ValueError(msg.format(key=key))
value = self._df.target
elif not pd.core.common.is_list_like(value):
value = self._df[value]
return value
# Axis grids
def FacetGrid(self, row=None, col=None, *args, **kwargs):
return self._module.FacetGrid(data=self._df, row=row, col=col,
*args, **kwargs)
def PairGrid(self, *args, **kwargs):
return self._module.PairGrid(data=self._df, *args, **kwargs)
def JointGrid(self, x, y, *args, **kwargs):
return self._module.JointGrid(x, y, data=self._df, *args, **kwargs)
# Distribution plots
def distplot(self, a=None, *args, **kwargs):
"""
Call ``seaborn.distplot`` using automatic mapping.
- ``a``: ``ModelFrame.target``
"""
a = self._maybe_target_series(a, key='a')
return self._module.distplot(a, *args, **kwargs)
def rugplot(self, a=None, *args, **kwargs):
"""
Call ``seaborn.rugplot`` using automatic mapping.
- ``a``: ``ModelFrame.target``
"""
a = self._maybe_target_series(a, key='a')
return self._module.rugplot(a, *args, **kwargs)
def kdeplot(self, data=None, data2=None, *args, **kwargs):
"""
Call ``seaborn.kdeplot`` using automatic mapping.
- ``data``: ``ModelFrame.target``
"""
data = self._maybe_target_series(data, key='data')
if data2 is not None:
if not pd.core.common.is_list_like(data2):
data2 = self._df[data2]
return self._module.kdeplot(data, data2=data2, *args, **kwargs)
# Regression plots
def interactplot(self, x1, x2, y=None, *args, **kwargs):
"""
Call ``seaborn.interactplot`` using automatic mapping.
- ``data``: ``ModelFrame``
- ``y``: ``ModelFrame.target_name``
"""
y = self._maybe_target_name(y, key='y')
return self._module.interactplot(x1, x2, y, data=self._df,
*args, **kwargs)
def coefplot(self, formula, *args, **kwargs):
"""
Call ``seaborn.coefplot`` using automatic mapping.
- ``data``: ``ModelFrame``
"""
return self._module.coefplot(formula, data=self._df, *args, **kwargs)
# Categorical plots
def countplot(self, x=None, y=None, *args, **kwargs):
"""
Call ``seaborn.countplot`` using automatic mapping.
- ``data``: ``ModelFrame``
- ``y``: ``ModelFrame.target_name``
"""
if x is None and y is None:
x = self._maybe_target_name(x, key='x')
return self._module.countplot(x, y, data=self._df, *args, **kwargs)
# Matrix plots
def heatmap(self, *args, **kwargs):
"""
Call ``seaborn.heatmap`` using automatic mapping.
- ``data``: ``ModelFrame``
"""
return self._module.heatmap(data=self._df, *args, **kwargs)
def clustermap(self, *args, **kwargs):
"""
Call ``seaborn.clustermap`` using automatic mapping.
- ``data``: ``ModelFrame``
"""
return self._module.clustermap(data=self._df, *args, **kwargs)
# Timeseries plots
def tsplot(self, *args, **kwargs):
"""
Call ``seaborn.tsplot`` using automatic mapping.
- ``data``: ``ModelFrame``
"""
return self._module.tsplot(data=self._df, *args, **kwargs)
def _wrap_xy_plot(func, func_name):
"""
Wrapper for plotting with x, y, data
"""
def f(self, x, y=None, *args, **kwargs):
y = self._maybe_target_name(y, key='y')
return func(x, y, data=self._df, *args, **kwargs)
f.__doc__ = (
"""
Call ``%s`` using automatic mapping.
- ``data``: ``ModelFrame``
- ``y``: ``ModelFrame.target_name``
""" % func_name)
return f
def _wrap_categorical_plot(func, func_name):
"""
Wrapper for categorical, x and y may be optional
"""
def f(self, y=None, x=None, *args, **kwargs):
if x is not None and y is None:
y = self._maybe_target_name(y, key='y')
elif x is None and y is not None:
x = self._maybe_target_name(x, key='x')
return func(x, y, data=self._df, *args, **kwargs)
f.__doc__ = (
"""
Call ``%s`` using automatic mapping. If you omit x
- ``data``: ``ModelFrame``
- ``x``: ``ModelFrame.target_name``
""" % func_name)
return f
def _wrap_data_plot(func, func_name):
"""
Wrapper for plotting with data
"""
def f(self, *args, **kwargs):
return func(data=self._df, *args, **kwargs)
f.__doc__ = (
"""
Call ``%s`` using automatic mapping.
- ``data``: ``ModelFrame``
""" % func_name)
return f
_xy_plots = ['jointplot', 'lmplot', 'regplot', 'residplot']
_attach_methods(SeabornMethods, _wrap_xy_plot, _xy_plots)
_categorical_plots = ['factorplot', 'boxplot', 'violinplot', 'stripplot',
'pointplot', 'barplot']
_attach_methods(SeabornMethods, _wrap_categorical_plot, _categorical_plots)
_data_plots = ['pairplot']
_attach_methods(SeabornMethods, _wrap_data_plot, _data_plots)
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/matplotlib/backends/backend_wxagg.py
|
8
|
5866
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from . import wx_compat as wxc
from . import backend_wx
from .backend_wx import (FigureManagerWx, FigureCanvasWx,
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, Toolbar)
import wx
show = backend_wx.Show()
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wxc.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wxc.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
FigureCanvas = FigureCanvasWxAgg
FigureManager = FigureManagerWx
|
mit
|
mugizico/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
283
|
1928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
NMTHydro/Recharge
|
utils/tornadoPlot_SA.py
|
1
|
4933
|
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance
# with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# =================================IMPORTS=======================================
import os
import matplotlib.pyplot as plt
from matplotlib import rc
from numpy import array, set_printoptions
from pandas import read_pickle, set_option, options
def round_to_value(number, roundto):
return round(number / roundto) * roundto
rc('mathtext', default='regular')
set_option('display.max_rows', None)
set_option('display.max_columns', None)
set_option('display.width', None)
set_option('display.precision', 3)
options.display.float_format = '${:,.2f}'.format
set_printoptions(threshold=3000, edgeitems=5000, precision=3)
set_option('display.height', None)
set_option('display.max_rows', None)
FACTORS = ['Temperature', 'Precipitation', 'Reference ET', 'Total Available Water (TAW)',
'Vegetation Density (NDVI)', 'Soil Ksat']
def make_tornado_plot(dataframe, factors, show=False, fig_path=None):
dfs = os.listdir(dataframe)
print 'pickled dfs: {}'.format(dfs)
filename = 'norm_sensitivity.pkl'
if filename in dfs:
df = read_pickle(os.path.join(dataframe, filename))
df.to_csv(os.path.join(fig_path, 'sample_norm_df.csv'))
print df
xx = 1
for index, row in df.iterrows():
print index, row
base = row[0][5]
lows = []
for fact in row:
lows.append(min(fact))
lows = array(lows)
values = []
for fact in row:
values.append(max(fact))
# The y position for each variable
ys = range(len(values))[::-1] # top to bottom
# Plot the bars, one by one
for y, low, value in zip(ys, lows, values):
# The width of the 'low' and 'high' pieces
low_width = base - low
high_width = abs(value - base)
# Each bar is a "broken" horizontal bar chart
plt.broken_barh(
[(low, low_width), (base, high_width)],
(y - 0.4, 0.8),
facecolors=['white', 'white'], # Try different colors if you like
edgecolors=['black', 'black'],
linewidth=1)
plt.subplots_adjust(left=0.32)
# Display the value as text. It should be positioned in the center of
# the 'high' bar, except if there isn't any room there, then it should be
# next to bar instead.
x = base + high_width / 2
if x <= base:
x = base + high_width
# plt.text(x, y, str(round(value - low, 1)) + 'mm', va='center', ha='center')
# Draw a vertical line down the middle
plt.axvline(base, color='black')
# Position the x-axis on the top, hide all the other spines (=axis lines)
axes = plt.gca() # (gca = get current axes)
axes.spines['left'].set_visible(False)
axes.spines['right'].set_visible(False)
axes.spines['bottom'].set_visible(False)
axes.xaxis.set_ticks_position('top')
# Make the y-axis display the factors
plt.yticks(ys, factors)
print 'location: {}'.format(index)
plt.title('{} [mm]'.format(index.replace('_', ' ')),
y=1.05)
# Set the portion of the x- and y-axes to show
plt.xlim(min(-20, 1.2 * min(lows)), base + 1.1 * max(values))
plt.ylim(-1, len(factors))
# plt.show()
if show:
plt.show()
# if fig_path:
# plt.savefig('{}_tornado'.format(index), fig_path, ext='jpg', dpi=500, close=True, verbose=True)
plt.close()
if __name__ == '__main__':
root = os.path.join('F:\\', 'ETRM_Inputs')
sensitivity = os.path.join(root, 'sensitivity_analysis')
pickles = os.path.join(sensitivity, 'pickled')
figure_save_path = os.path.join(sensitivity, 'figures')
make_tornado_plot(pickles, FACTORS, fig_path=figure_save_path, show=True)
# ========================== EOF ==============================================
|
apache-2.0
|
466152112/scikit-learn
|
examples/ensemble/plot_voting_decision_regions.py
|
230
|
2386
|
"""
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/core/arrays/sparse/scipy_sparse.py
|
1
|
5381
|
"""
Interaction with scipy.sparse matrices.
Currently only includes to_coo helpers.
"""
from pandas.core.indexes.api import Index, MultiIndex
from pandas.core.series import Series
def _check_is_partition(parts, whole):
whole = set(whole)
parts = [set(x) for x in parts]
if set.intersection(*parts) != set():
raise ValueError("Is not a partition because intersection is not null.")
if set.union(*parts) != whole:
raise ValueError("Is not a partition because union is not the whole.")
def _to_ijv(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
For arbitrary (MultiIndexed) sparse Series return
(v, i, j, ilabels, jlabels) where (v, (i, j)) is suitable for
passing to scipy.sparse.coo constructor.
"""
# index and column levels must be a partition of the index
_check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
# from the sparse Series: get the labels and data for non-null entries
values = ss.array._valid_sp_values
nonnull_labels = ss.dropna()
def get_indexers(levels):
""" Return sparse coords and dense labels for subset levels """
# TODO: how to do this better? cleanly slice nonnull_labels given the
# coord
values_ilabels = [tuple(x[i] for i in levels) for x in nonnull_labels.index]
if len(levels) == 1:
values_ilabels = [x[0] for x in values_ilabels]
# # performance issues with groupby ###################################
# TODO: these two lines can replace the code below but
# groupby is too slow (in some cases at least)
# labels_to_i = ss.groupby(level=levels, sort=sort_labels).first()
# labels_to_i[:] = np.arange(labels_to_i.shape[0])
def _get_label_to_i_dict(labels, sort_labels=False):
"""
Return dict of unique labels to number.
Optionally sort by label.
"""
labels = Index(map(tuple, labels)).unique().tolist() # squish
if sort_labels:
labels = sorted(labels)
return {k: i for i, k in enumerate(labels)}
def _get_index_subset_to_coord_dict(index, subset, sort_labels=False):
ilabels = list(zip(*[index._get_level_values(i) for i in subset]))
labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels)
labels_to_i = Series(labels_to_i)
if len(subset) > 1:
labels_to_i.index = MultiIndex.from_tuples(labels_to_i.index)
labels_to_i.index.names = [index.names[i] for i in subset]
else:
labels_to_i.index = Index(x[0] for x in labels_to_i.index)
labels_to_i.index.name = index.names[subset[0]]
labels_to_i.name = "value"
return labels_to_i
labels_to_i = _get_index_subset_to_coord_dict(
ss.index, levels, sort_labels=sort_labels
)
# #####################################################################
# #####################################################################
i_coord = labels_to_i[values_ilabels].tolist()
i_labels = labels_to_i.index.tolist()
return i_coord, i_labels
i_coord, i_labels = get_indexers(row_levels)
j_coord, j_labels = get_indexers(column_levels)
return values, i_coord, j_coord, i_labels, j_labels
def _sparse_series_to_coo(ss, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Convert a sparse Series to a scipy.sparse.coo_matrix using index
levels row_levels, column_levels as the row and column
labels respectively. Returns the sparse_matrix, row and column labels.
"""
import scipy.sparse
if ss.index.nlevels < 2:
raise ValueError("to_coo requires MultiIndex with nlevels > 2")
if not ss.index.is_unique:
raise ValueError(
"Duplicate index entries are not allowed in to_coo transformation."
)
# to keep things simple, only rely on integer indexing (not labels)
row_levels = [ss.index._get_level_number(x) for x in row_levels]
column_levels = [ss.index._get_level_number(x) for x in column_levels]
v, i, j, rows, columns = _to_ijv(
ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
)
sparse_matrix = scipy.sparse.coo_matrix(
(v, (i, j)), shape=(len(rows), len(columns))
)
return sparse_matrix, rows, columns
def _coo_to_sparse_series(A, dense_index: bool = False):
"""
Convert a scipy.sparse.coo_matrix to a SparseSeries.
Parameters
----------
A : scipy.sparse.coo.coo_matrix
dense_index : bool, default False
Returns
-------
Series
Raises
------
TypeError if A is not a coo_matrix
"""
from pandas import SparseDtype
try:
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col)))
except AttributeError as err:
raise TypeError(
f"Expected coo_matrix. Got {type(A).__name__} instead."
) from err
s = s.sort_index()
s = s.astype(SparseDtype(s.dtype))
if dense_index:
# is there a better constructor method to use here?
i = range(A.shape[0])
j = range(A.shape[1])
ind = MultiIndex.from_product([i, j])
s = s.reindex(ind)
return s
|
bsd-3-clause
|
JosmanPS/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
48
|
12645
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_not_mac_os()
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_not_mac_os()
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
NicholasBermuda/transit-fitting
|
transitfit/kepler.py
|
1
|
5322
|
from __future__ import print_function, division
import re
import pandas as pd
import numpy as np
import kplr
from .lightcurve import LightCurve, Planet, BinaryLightCurve
KEPLER_CADENCE = 1626./86400
def lc_dataframe(lc):
"""Returns a pandas DataFrame of given lightcurve data
"""
with lc.open() as f:
data = f[1].data
data = np.array(data).byteswap().newbyteorder()
return pd.DataFrame(data)
def all_LCdata(koi, mask_bad=False):
"""
Returns all data for a given koi, in a pandas dataframe
PDCSAP_FLUX is quarter-stitched and normalized, with bad points masked
"""
df = pd.DataFrame()
for lc in koi.get_light_curves():
if re.search('_llc\.fits', lc.filename):
newdf = lc_dataframe(lc)
normalfactor = newdf['PDCSAP_FLUX'].mean()
newdf['PDCSAP_FLUX'] /= normalfactor
newdf['PDCSAP_FLUX_ERR'] /= normalfactor
df = pd.concat([df, newdf])
if mask_bad:
ok = np.isfinite(df['PDCSAP_FLUX']) & (df['SAP_QUALITY']==0)
else:
ok = np.ones(len(df)).astype(bool)
return df[ok]
def kepler_planets(koinum, i):
#reads in the planets from a koi and adds them to the list of planets
#as a Planet object
client = kplr.API()
if type(i)==int:
ilist = [i]
else:
ilist = i
koi_list = [koinum + i*0.01 for i in ilist]
planets = []
kois = []
for k in koi_list:
k = client.koi(k)
planets.append(Planet((k.koi_period, k.koi_period_err1),
(k.koi_time0bk, k.koi_time0bk_err1),
k.koi_duration/24,
name=k.kepoi_name))
kois.append(k)
return kois, planets
class KeplerLightCurve(LightCurve):
"""A LightCurve of a Kepler star
:param koinum:
KOI number (integer).
:param i:
Planet number, either integer (1 through koi_count),
list of integers, or None (in which case all planets will be modeled).
"""
def __init__(self, koinum, i=None,**kwargs):
self.koinum = koinum #used for multinest basename folder organisation
client = kplr.API() #interacting with Kepler archive
koi = client.koi(koinum + 0.01) #getting the first planet to download info
if i is None: #if there is no input
i = range(1,koi.koi_count+1) #then we create an array of all the planets
lcdata = all_LCdata(koi) #downloads all the light curve data
#mask out NaNs
mask = ~np.isfinite(lcdata['PDCSAP_FLUX']) | lcdata['SAP_QUALITY']
kois, planets = kepler_planets(koinum, i=i) #get the kois and planets
self.kois = kois
super(KeplerLightCurve, self).__init__(lcdata['TIME'],
lcdata['PDCSAP_FLUX'],
lcdata['PDCSAP_FLUX_ERR'],
mask=mask, planets=planets,
texp=KEPLER_CADENCE, **kwargs)
@property
def archive_params(self):
#reads in the parameters from the archive
params = [1, self.kois[0].koi_srho, 0.5, 0.5, 0]
for k in self.kois:
params += [k.koi_period, k.koi_time0bk, k.koi_impact, k.koi_ror, 0, 0]
return params
def archive_light_curve(self, t):
#reads in the light curve data from the archive
return self.light_curve(self.archive_params, t)
class BinaryKeplerLightCurve(BinaryLightCurve):
"""BinaryLightCurve of a Kepler star
:param koinum:
KOI number (integer)
:param i:
Planet number, either integer (1 through koi_count),
list of integers, or None (in which case all planets will be modeled).
"""
def __init__(self, koinum, i=None,rhostarA=None,rhostarB=None,dilution = None,**kwargs):
self.koinum = koinum #used for multinest basename folder organisation
client = kplr.API() #interacting with Kepler archive
koi = client.koi(koinum + 0.01) #getting the first planet to download info
if i is None: #if there is no input
i = range(1,koi.koi_count+1) #then we create an array of all the planets
lcdata = all_LCdata(koi) #downloads all the light curve data
#mask out NaNs
mask = ~np.isfinite(lcdata['PDCSAP_FLUX']) | lcdata['SAP_QUALITY']
kois, planets = kepler_planets(koinum, i=i) #get the kois and planets
self.kois = kois
super(BinaryKeplerLightCurve, self).__init__(lcdata['TIME'],
lcdata['PDCSAP_FLUX'],
lcdata['PDCSAP_FLUX_ERR'],
rhostarA=rhostarA,rhostarB=rhostarB,
dilution = dilution,
mask=mask, planets=planets,
texp=KEPLER_CADENCE, **kwargs)
# @classmethod
# def from_hdf(cls, *args, **kwargs):
# raise NotImplementedError
# @classmethod
# def from_df(cls, df, **kwargs):
# raise NotImplementedError
|
mit
|
equialgo/scikit-learn
|
sklearn/datasets/base.py
|
5
|
26099
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import sys
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os.path import splitext
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description : string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, 'rb') as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris(return_X_y=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'iris.csv')) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
with open(join(module_path, 'descr', 'iris.rst')) as rst_file:
fdescr = rst_file.read()
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_breast_cancer(return_X_y=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Parameters
----------
return_X_y : boolean, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', 'breast_cancer.csv')) as csv_file:
data_file = csv.reader(csv_file)
first_line = next(data_file)
n_samples = int(first_line[0])
n_features = int(first_line[1])
target_names = np.array(first_line[2:4])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for count, value in enumerate(data_file):
data[count] = np.asarray(value[:-1], dtype=np.float64)
target[count] = np.asarray(value[-1], dtype=np.int)
with open(join(module_path, 'descr', 'breast_cancer.rst')) as rst_file:
fdescr = rst_file.read()
feature_names = np.array(['mean radius', 'mean texture',
'mean perimeter', 'mean area',
'mean smoothness', 'mean compactness',
'mean concavity', 'mean concave points',
'mean symmetry', 'mean fractal dimension',
'radius error', 'texture error',
'perimeter error', 'area error',
'smoothness error', 'compactness error',
'concavity error', 'concave points error',
'symmetry error', 'fractal dimension error',
'worst radius', 'worst texture',
'worst perimeter', 'worst area',
'worst smoothness', 'worst compactness',
'worst concavity', 'worst concave points',
'worst symmetry', 'worst fractal dimension'])
if return_X_y:
return data, target
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names)
def load_digits(n_class=10, return_X_y=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt #doctest: +SKIP
>>> plt.gray() #doctest: +SKIP
>>> plt.matshow(digits.images[0]) #doctest: +SKIP
>>> plt.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
with open(join(module_path, 'descr', 'digits.rst')) as f:
descr = f.read()
target = data[:, -1].astype(np.int)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
if return_X_y:
return flat_data, target
return Bunch(data=flat_data,
target=target,
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes(return_X_y=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
if return_X_y:
return data, target
return Bunch(data=data, target=target,
feature_names=['age', 'sex', 'bmi', 'bp',
's1', 's2', 's3', 's4', 's5', 's6'])
def load_linnerud(return_X_y=False):
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
if return_X_y:
return data_exercise, data_physiological
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston(return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Parameters
----------
return_X_y : boolean, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
and 'DESCR', the full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
fdescr_name = join(module_path, 'descr', 'boston_house_prices.rst')
with open(fdescr_name) as f:
descr_text = f.read()
data_file_name = join(module_path, 'data', 'boston_house_prices.csv')
with open(data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Ensure different filenames for Python 2 and Python 3 pickles
An object pickled under Python 3 cannot be loaded under Python 2.
An object pickled under Python 2 can sometimes not be loaded
correctly under Python 3 because some Python 2 strings are decoded as
Python 3 strings which can be problematic for objects that use Python 2
strings as byte buffers for numerical data instead of "real" strings.
Therefore, dataset loaders in scikit-learn use different files for pickles
manages by Python 2 and Python 3 in the same SCIKIT_LEARN_DATA folder so
as to avoid conflicts.
args[-1] is expected to be the ".pkl" filename. Under Python 3, a
suffix is inserted before the extension to s
_pkl_filepath('/path/to/folder', 'filename.pkl') returns:
- /path/to/folder/filename.pkl under Python 2
- /path/to/folder/filename_py3.pkl under Python 3+
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
if sys.version_info[0] >= 3:
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
|
bsd-3-clause
|
KawalMusikIndonesia/kimi
|
kimiserver/apps/run_tests.py
|
16
|
6040
|
from dejavu.testing import *
from dejavu import Dejavu
from optparse import OptionParser
import matplotlib.pyplot as plt
import time
import shutil
usage = "usage: %prog [options] TESTING_AUDIOFOLDER"
parser = OptionParser(usage=usage, version="%prog 1.1")
parser.add_option("--secs",
action="store",
dest="secs",
default=5,
type=int,
help='Number of seconds starting from zero to test')
parser.add_option("--results",
action="store",
dest="results_folder",
default="./dejavu_test_results",
help='Sets the path where the results are saved')
parser.add_option("--temp",
action="store",
dest="temp_folder",
default="./dejavu_temp_testing_files",
help='Sets the path where the temp files are saved')
parser.add_option("--log",
action="store_true",
dest="log",
default=True,
help='Enables logging')
parser.add_option("--silent",
action="store_false",
dest="silent",
default=False,
help='Disables printing')
parser.add_option("--log-file",
dest="log_file",
default="results-compare.log",
help='Set the path and filename of the log file')
parser.add_option("--padding",
action="store",
dest="padding",
default=10,
type=int,
help='Number of seconds to pad choice of place to test from')
parser.add_option("--seed",
action="store",
dest="seed",
default=None,
type=int,
help='Random seed')
options, args = parser.parse_args()
test_folder = args[0]
# set random seed if set by user
set_seed(options.seed)
# ensure results folder exists
try:
os.stat(options.results_folder)
except:
os.mkdir(options.results_folder)
# set logging
if options.log:
logging.basicConfig(filename=options.log_file, level=logging.DEBUG)
# set test seconds
test_seconds = ['%dsec' % i for i in range(1, options.secs + 1, 1)]
# generate testing files
for i in range(1, options.secs + 1, 1):
generate_test_files(test_folder, options.temp_folder,
i, padding=options.padding)
# scan files
log_msg("Running Dejavu fingerprinter on files in %s..." % test_folder,
log=options.log, silent=options.silent)
tm = time.time()
djv = DejavuTest(options.temp_folder, test_seconds)
log_msg("finished obtaining results from dejavu in %s" % (time.time() - tm),
log=options.log, silent=options.silent)
tests = 1 # djv
n_secs = len(test_seconds)
# set result variables -> 4d variables
all_match_counter = [[[0 for x in xrange(tests)] for x in xrange(3)] for x in xrange(n_secs)]
all_matching_times_counter = [[[0 for x in xrange(tests)] for x in xrange(2)] for x in xrange(n_secs)]
all_query_duration = [[[0 for x in xrange(tests)] for x in xrange(djv.n_lines)] for x in xrange(n_secs)]
all_match_confidence = [[[0 for x in xrange(tests)] for x in xrange(djv.n_lines)] for x in xrange(n_secs)]
# group results by seconds
for line in range(0, djv.n_lines):
for col in range(0, djv.n_columns):
# for dejavu
all_query_duration[col][line][0] = djv.result_query_duration[line][col]
all_match_confidence[col][line][0] = djv.result_match_confidence[line][col]
djv_match_result = djv.result_match[line][col]
if djv_match_result == 'yes':
all_match_counter[col][0][0] += 1
elif djv_match_result == 'no':
all_match_counter[col][1][0] += 1
else:
all_match_counter[col][2][0] += 1
djv_match_acc = djv.result_matching_times[line][col]
if djv_match_acc == 0 and djv_match_result == 'yes':
all_matching_times_counter[col][0][0] += 1
elif djv_match_acc != 0:
all_matching_times_counter[col][1][0] += 1
# create plots
djv.create_plots('Confidence', all_match_confidence, options.results_folder)
djv.create_plots('Query duration', all_query_duration, options.results_folder)
for sec in range(0, n_secs):
ind = np.arange(3) #
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 2.75])
means_dvj = [round(x[0] * 100 / djv.n_lines, 1) for x in all_match_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Percentage')
ax.set_title('%s Matching Percentage' % test_seconds[sec])
ax.set_xticks(ind + width)
labels = ['yes','no','invalid']
ax.set_xticklabels( labels )
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
#ax.legend((rects1[0]), ('Dejavu'), loc='center left', bbox_to_anchor=(1, 0.5))
autolabeldoubles(rects1,ax)
plt.grid()
fig_name = os.path.join(options.results_folder, "matching_perc_%s.png" % test_seconds[sec])
fig.savefig(fig_name)
for sec in range(0, n_secs):
ind = np.arange(2) #
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1*width, 1.75])
div = all_match_counter[sec][0][0]
if div == 0 :
div = 1000000
means_dvj = [round(x[0] * 100 / div, 1) for x in all_matching_times_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Accuracy')
ax.set_title('%s Matching Times Accuracy' % test_seconds[sec])
ax.set_xticks(ind + width)
labels = ['yes','no']
ax.set_xticklabels( labels )
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
#ax.legend( (rects1[0]), ('Dejavu'), loc='center left', bbox_to_anchor=(1, 0.5))
autolabeldoubles(rects1,ax)
plt.grid()
fig_name = os.path.join(options.results_folder, "matching_acc_%s.png" % test_seconds[sec])
fig.savefig(fig_name)
# remove temporary folder
shutil.rmtree(options.temp_folder)
|
gpl-3.0
|
DrSkippy/php_books_database
|
tools/bookdbtool/visualizations.py
|
1
|
1064
|
import logging
import pandas as pd
import matplotlib.pyplot as plt
def running_total_comparison(df1, window=15):
fig_size = [12,12]
xlim = [0,365]
ylim = [0,max(df1.Pages)]
years = df1.Year.unique()[-window:].tolist()
y = years.pop(0)
_df = df1.loc[df1.Year == y]
ax = _df.plot("Day", "Pages", figsize=fig_size, xlim=xlim, ylim=ylim, label=y)
for y in years:
_df = df1.loc[df1.Year == y]
ax = _df.plot("Day", "Pages", figsize=fig_size, xlim=xlim, ylim=ylim, ax=ax, label=y)
def yearly_comparisons(df, current_year=2020):
now = df.loc[df.Year == current_year]
fig_size = [12, 6]
ax = df.hist("Pages Read", bins=14, color="darkblue", figsize=fig_size)
plt.axvline(x=int(now["Pages Read"]), color="red")
plt.show()
df.plot.bar(x="Rank", y="Pages Read", width=.95, color="darkblue", figsize=fig_size)
plt.axvline(x=int(now["Rank"]) - 1, color="red")
plt.show()
df.sort_values("Year").plot.bar(x="Year", y="Pages Read", width=.95, color="darkblue", figsize=fig_size)
plt.show()
|
bsd-2-clause
|
tomaslaz/KLMC_Analysis
|
DM_DOS.py
|
2
|
6477
|
#!/usr/bin/env python
"""
A script to plot DOS (integrated)
@author Tomas Lazauskas, David Mora Fonz, 2016
@web www.lazauskas.net
@email tomas.lazauskas[a]gmail.com
"""
import copy
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from optparse import OptionParser
import scipy.special
import sys
_delta = 0.01
_extraBins = 2
_sigma = 0.1
_kB = 0.0257/298.0
_colours = ['r', 'b', 'g', 'y', 'c', 'm', 'darkblue', 'sienna', 'indigo', 'orange', 'grey', 'brown']
def cmdLineArgs():
"""
Handles command line arguments and options.
"""
usage = "usage: %prog inputFile"
parser = OptionParser(usage=usage)
parser.disable_interspersed_args()
parser.add_option('-t', dest="temps", default=None, help="List of temperatures, separated by a comma (default t=0)")
(options, args) = parser.parse_args()
if (len(args) != 1):
parser.error("incorrect number of arguments")
return options, args
def getTheListOfTemps(tempString):
"""
A function to process an argument string line to an array of temperatures
"""
temps = None
arrString = tempString.split(",")
lenArrString = len(arrString)
temps = np.zeros(lenArrString)
for i in range(lenArrString):
try:
temp = float(arrString[i])
except:
sys.exit("Incorrect temperatures.")
temps[i] = temp
return temps
def plotDOS(energyBins, energyDOS, eMax):
"""
Plots the DOS graph.
"""
fig = plt.figure(figsize=(9, 6))
ax1 = fig.add_subplot(1,1,1)
plt.subplots_adjust(left=0.1, bottom=0.11, top=0.95, right=0.95)
ax1.plot(energyBins, energyDOS, c='r', linewidth=2.0)
plt.grid()
stepSize = roundTo1St(eMax/10)
ax1.xaxis.set_ticks(np.arange(0, eMax, stepSize))
ax1.set_xlabel('Energy (eV)', fontsize=18)
ax1.set_ylabel('DOS', fontsize=18)
fig.savefig('DOS.png', dpi=300, bbox_inches='tight')
def plotDOSandIntegratedDOS(energyBins, energyDOS, tempArrs, noOfTemps, temps, eMax):
"""
Plots DOS and integrated DOS on the same graph.
"""
same = []
labels = []
fig = plt.figure(figsize=(9, 6))
ax1 = fig.add_subplot(1,1,1)
plt.subplots_adjust(left=0.1, bottom=0.11, top=0.95, right=0.90)
ax1.set_ylabel('DOS', fontsize=18)
# plotting DOS
label = "DOS"
series, = ax1.plot(energyBins, energyDOS, c=_colours[0], label=label, linewidth=3.0)
labels.append(label)
same.append(series)
# plotting integrated DOS
ax2 = ax1.twinx()
for i in range(noOfTemps):
label = "%d K" % (temps[i])
series, = ax2.plot(energyBins, tempArrs[:, i], c=_colours[i+1], label=label, linewidth=2.0)
labels.append(label)
same.append(series)
plt.grid()
plt.legend(same, labels, loc=0)
stepSize = roundTo1St(eMax/10)
ax1.xaxis.set_ticks(np.arange(0, eMax, stepSize))
ax1.set_xlabel('Energy (eV)', fontsize=18)
ax2.set_ylabel('Integrated DOS', fontsize=18)
fig.savefig('DOSandIntegratedDOS.png', dpi=300, bbox_inches='tight')
# Lets print the integrated dos values:
print "-"*33
print "Temperature | Configurations"
print "-"*33
for i in range(noOfTemps):
print "%11d K | %11f" % (temps[i], tempArrs[len(energyBins)-1, i])
print "-"*33
def plotIntegratedDOS(energyBins, tempArrs, noOfTemps, temps, eMax):
"""
Plots the integrated DOS graph.
"""
series = []
labels = []
fig = plt.figure(figsize=(9, 6))
ax1 = fig.add_subplot(1,1,1)
plt.subplots_adjust(left=0.1, bottom=0.11, top=0.95, right=0.95)
for i in range(noOfTemps):
label = "%d K" % (temps[i])
serie, = ax1.plot(energyBins, tempArrs[:, i], c=_colours[i], label=label, linewidth=2.0)
labels.append(label)
series.append(serie)
plt.grid()
plt.legend(series, labels, loc=0, fontsize=18)
stepSize = roundTo1St(eMax/10)
ax1.xaxis.set_ticks(np.arange(0, eMax, stepSize))
ax1.set_xlabel('Energy (eV)', fontsize=18)
ax1.set_ylabel('Integrated DOS', fontsize=18)
fig.savefig('Integrated_DOS.png', dpi=300, bbox_inches='tight')
def roundTo1St(x):
"""
A function which rounds to the first significant number
"""
return round(x, -int(math.floor(math.log10(abs(x)))))
def runDOS(energies_input, temps):
"""
Calculates and plots DOS
"""
energies = copy.deepcopy(energies_input)
# getting the number of temperatures
noOfTemps = len(temps)
# pushing by eMin
energies -= energies.min()
# getting the unique list of energies
energiesUnique = np.unique(energies)
# get min and max
eMin = energies.min()
eMax = energies.max() + _extraBins*_delta
# number of bins
M = int((eMax - eMin)/_delta)
# creating energy bins
energyBins = np.arange(eMin, np.around([M * _delta], decimals=4), _delta)
# preparing a DOS array
energyDOS = np.zeros(M, dtype=np.float32)
# creating temperature arrays
tempArrs = np.zeros([M, noOfTemps], dtype=np.float32)
# calculating DOS and integrated DOS with respect to the temperatures
for i in range(M):
energyDOS[i] = np.sum((1/(_sigma*np.pi**0.5)) * np.exp(-(energyBins[i] - energies)**2 / _sigma**2))
if noOfTemps > 0:
# going through the list of temperatures
for j in range(noOfTemps):
temp = temps[j]
if temp > 0.0:
# calculating the integrated DOS
tempArrs[i][j] = ((1.0/(_sigma*np.pi**0.5)) *
np.sum(((np.pi**0.5/(2*_sigma**-1)) *
(scipy.special.erf(1/_sigma * (energyBins[i] - energiesUnique)) -
scipy.special.erf(-np.infty))) *
np.exp(-(energiesUnique)/(_kB*temp) )))
else:
tempArrs[i][j] = None
# printing DOS graph
plotDOS(energyBins, energyDOS, eMax)
if noOfTemps > 0:
# printing integrated DOS graph
plotIntegratedDOS(energyBins, tempArrs, noOfTemps, temps, eMax)
# printing DOS and integrated DOS
plotDOSandIntegratedDOS(energyBins, energyDOS, tempArrs, noOfTemps, temps, eMax)
if __name__ == "__main__":
options, args = cmdLineArgs()
# getting the temperatures
if options.temps is not None:
temps = getTheListOfTemps(options.temps)
else:
temps = []
# reading the energies from a file
energies = np.loadtxt(args[0])
runDOS(energies, temps)
print "Finished."
|
gpl-3.0
|
yl565/statsmodels
|
statsmodels/examples/ex_kernel_regression.py
|
34
|
1785
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 09:17:40 2013
Author: Josef Perktold based on test file by George Panterov
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
#import statsmodels.api as sm
#nparam = sm.nonparametric
italy_gdp = \
[8.556, 12.262, 9.587, 8.119, 5.537, 6.796, 8.638,
6.483, 6.212, 5.111, 6.001, 7.027, 4.616, 3.922,
4.688, 3.957, 3.159, 3.763, 3.829, 5.242, 6.275,
8.518, 11.542, 9.348, 8.02, 5.527, 6.865, 8.666,
6.672, 6.289, 5.286, 6.271, 7.94, 4.72, 4.357,
4.672, 3.883, 3.065, 3.489, 3.635, 5.443, 6.302,
9.054, 12.485, 9.896, 8.33, 6.161, 7.055, 8.717,
6.95]
italy_year = \
[1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951,
1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1953, 1953,
1953, 1953, 1953, 1953, 1953, 1953]
italy_year = np.asarray(italy_year, float)
model = nparam.KernelReg(endog=[italy_gdp],
exog=[italy_year], reg_type='lc',
var_type='o', bw='cv_ls')
sm_bw = model.bw
R_bw = 0.1390096
sm_mean, sm_mfx = model.fit()
sm_mean2 = sm_mean[0:5]
sm_mfx = sm_mfx[0:5]
R_mean = 6.190486
sm_R2 = model.r_squared()
R_R2 = 0.1435323
npt.assert_allclose(sm_bw, R_bw, atol=1e-2)
npt.assert_allclose(sm_mean2, R_mean, atol=1e-2)
npt.assert_allclose(sm_R2, R_R2, atol=1e-2)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(italy_year, italy_gdp, 'o')
ax.plot(italy_year, sm_mean, '-')
plt.show()
|
bsd-3-clause
|
bthirion/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
28
|
17934
|
import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm, fast_dot
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = fast_dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
|
bsd-3-clause
|
simonsfoundation/CaImAn
|
caiman/source_extraction/volpy/mrcnn/visualize.py
|
2
|
19666
|
"""
Mask R-CNN
Display and Visualization Functions.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import os
import sys
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
import IPython.display
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from ..mrcnn import utils
############################################################
# Visualization
############################################################
def display_images(images, titles=None, cols=4, cmap=None, norm=None,
interpolation=None):
"""Display the given set of images, optionally with titles.
images: list or array of image tensors in HWC format.
titles: optional. A list of titles to display with each image.
cols: number of images per row
cmap: Optional. Color map to use. For example, "Blues".
norm: Optional. A Normalize instance to map values to colors.
interpolation: Optional. Image interpolation to use for display.
"""
titles = titles if titles is not None else [""] * len(images)
rows = len(images) // cols + 1
plt.figure(figsize=(14, 14 * rows // cols))
i = 1
for image, title in zip(images, titles):
plt.subplot(rows, cols, i)
plt.title(title, fontsize=9)
plt.axis('off')
plt.imshow(image, cmap=cmap,
norm=norm, interpolation=interpolation)
i += 1
plt.show()
def random_colors(N, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def apply_mask(image, mask, color, alpha=0.5):
"""Apply the given mask to the image.
"""
for c in range(3):
image[:, :, c] = np.where(mask == 1,
image[:, :, c] *
(1 - alpha) + alpha * color[c] * 255,
image[:, :, c])
return image
def display_instances(image, boxes, masks, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None,
show_mask=True,show_bbox=True,
colors=None, captions=None):
"""
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
masks: [height, width, num_instances]
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
title: (optional) Figure title
show_mask, show_bbox: To show masks and bounding boxes or not
figsize: (optional) the size of the image
colors: (optional) An array or colors to use with each object
captions: (optional) A list of strings to use as captions for each object
"""
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
else:
assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
# If no axis is passed, create one and automatically call show()
auto_show = False
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
auto_show = True
# Generate random colors
colors = colors or random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
#image_path = '/home/nel/Code/VolPy/Mask_RCNN/backup/inference.npz'
#image1 = np.load(image_path)['arr_0']
#ax.imshow(image1, cmap='gray', vmax=np.percentile(image1,98))
ax.imshow(image[:,:,0], cmap='gray',vmax=np.percentile(image,99))
masked_image = np.zeros(image.copy().shape)#.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
if show_bbox:
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1,
alpha=1, linestyle=":",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
if not captions:
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
#caption = "{} {:.3f}".format(label, score) if score else label
caption = "{:.2f}".format(score) if score else label
else:
caption = captions[i]
ax.text(x1+6, y1 + 12, caption, alpha=1,
color='r', size=10, backgroundcolor="none")
# Mask
mask = masks[:, :, i]
if show_mask:
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
#ax.imshow(masked_image[:,:,0],cmap='Greys')
#ax.imshow(image[:,:,0])#.astype(np.uint8)
#import matplotlib
#print(matplotlib.rcParams['ps.fonttype'])
#print(matplotlib.rcParams['pdf.fonttype'])
#plt.savefig('/home/nel/Code/VolPy/Mask_RCNN/results/inference_fish1_2.pdf')
if auto_show:
plt.show()
def display_differences(image,
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
class_names, title="", ax=None,
show_mask=True, show_box=True,
iou_threshold=0.5, score_threshold=0.5):
"""Display ground truth and prediction instances on the same image."""
# Match predictions to ground truth
gt_match, pred_match, overlaps = utils.compute_matches(
gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold, score_threshold=score_threshold)
# Ground truth = green. Predictions = red
colors = [(0, 1, 0, .8)] * len(gt_match)\
+ [(1, 0, 0, 1)] * len(pred_match)
# Concatenate GT and predictions
class_ids = np.concatenate([gt_class_id, pred_class_id])
scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])
boxes = np.concatenate([gt_box, pred_box])
masks = np.concatenate([gt_mask, pred_mask], axis=-1)
# Captions per instance show score/IoU
captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format(
pred_score[i],
(overlaps[i, int(pred_match[i])]
if pred_match[i] > -1 else overlaps[i].max()))
for i in range(len(pred_match))]
# Set title if not provided
title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU"
# Display
display_instances(
image,
boxes, masks, class_ids,
class_names, scores, ax=ax,
show_bbox=show_box, show_mask=show_mask,
colors=colors, captions=captions,
title=title)
def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
"""
anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
proposals: [n, 4] the same anchors but refined to fit objects better.
"""
masked_image = image.copy()
# Pick random anchors in case there are too many.
ids = np.arange(rois.shape[0], dtype=np.int32)
ids = np.random.choice(
ids, limit, replace=False) if ids.shape[0] > limit else ids
fig, ax = plt.subplots(1, figsize=(12, 12))
if rois.shape[0] > limit:
plt.title("Showing {} random ROIs out of {}".format(
len(ids), rois.shape[0]))
else:
plt.title("{} ROIs".format(len(ids)))
# Show area outside image boundaries.
ax.set_ylim(image.shape[0] + 20, -20)
ax.set_xlim(-50, image.shape[1] + 20)
ax.axis('off')
for i, id in enumerate(ids):
color = np.random.rand(3)
class_id = class_ids[id]
# ROI
y1, x1, y2, x2 = rois[id]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
edgecolor=color if class_id else "gray",
facecolor='none', linestyle="dashed")
ax.add_patch(p)
# Refined ROI
if class_id:
ry1, rx1, ry2, rx2 = refined_rois[id]
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal for easy visualization
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Label
label = class_names[class_id]
ax.text(rx1, ry1 + 8, "{}".format(label),
color='w', size=11, backgroundcolor="none")
# Mask
m = utils.unmold_mask(mask[id], rois[id]
[:4].astype(np.int32), image.shape)
masked_image = apply_mask(masked_image, m, color)
ax.imshow(masked_image)
# Print stats
print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
print("Positive Ratio: {:.2f}".format(
class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
# TODO: Replace with matplotlib equivalent?
def draw_box(image, box, color):
"""Draw 3-pixel width bounding boxes on the given image array.
color: list of 3 int values for RGB.
"""
y1, x1, y2, x2 = box
image[y1:y1 + 2, x1:x2] = color
image[y2:y2 + 2, x1:x2] = color
image[y1:y2, x1:x1 + 2] = color
image[y1:y2, x2:x2 + 2] = color
return image
def display_top_masks(image, mask, class_ids, class_names, limit=4):
"""Display the given image and the top few class masks."""
to_display = []
titles = []
to_display.append(image)
titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
# Pick top prominent classes in this image
unique_class_ids = np.unique(class_ids)
mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
for i in unique_class_ids]
top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
key=lambda r: r[1], reverse=True) if v[1] > 0]
# Generate images and titles
for i in range(limit):
class_id = top_ids[i] if i < len(top_ids) else -1
# Pull masks of instances belonging to the same class.
m = mask[:, :, np.where(class_ids == class_id)[0]]
m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
to_display.append(m)
titles.append(class_names[class_id] if class_id != -1 else "-")
display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r")
def plot_precision_recall(AP, precisions, recalls):
"""Draw the precision-recall curve.
AP: Average precision at IoU >= 0.5
precisions: list of precision values
recalls: list of recall values
"""
# Plot the Precision-Recall curve
_, ax = plt.subplots(1)
ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
ax.set_ylim(0, 1.1)
ax.set_xlim(0, 1.1)
_ = ax.plot(recalls, precisions)
def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
overlaps, class_names, threshold=0.5):
"""Draw a grid showing how ground truth objects are classified.
gt_class_ids: [N] int. Ground truth class IDs
pred_class_id: [N] int. Predicted class IDs
pred_scores: [N] float. The probability scores of predicted classes
overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.
class_names: list of all class names in the dataset
threshold: Float. The prediction probability required to predict a class
"""
gt_class_ids = gt_class_ids[gt_class_ids != 0]
pred_class_ids = pred_class_ids[pred_class_ids != 0]
plt.figure(figsize=(12, 10))
plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
plt.yticks(np.arange(len(pred_class_ids)),
["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
for i, id in enumerate(pred_class_ids)])
plt.xticks(np.arange(len(gt_class_ids)),
[class_names[int(id)] for id in gt_class_ids], rotation=90)
thresh = overlaps.max() / 2.
for i, j in itertools.product(range(overlaps.shape[0]),
range(overlaps.shape[1])):
text = ""
if overlaps[i, j] > threshold:
text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
color = ("white" if overlaps[i, j] > thresh
else "black" if overlaps[i, j] > 0
else "grey")
plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
horizontalalignment="center", verticalalignment="center",
fontsize=9, color=color)
plt.tight_layout()
plt.xlabel("Ground Truth")
plt.ylabel("Predictions")
def draw_boxes(image, boxes=None, refined_boxes=None,
masks=None, captions=None, visibilities=None,
title="", ax=None, cmap=None, vmax=None):
"""Draw bounding boxes and segmentation masks with different
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
masks: [N, height, width]
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominent each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
"""
# Number of boxes
assert boxes is not None or refined_boxes is not None
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(12, 12))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
ax.imshow(image[:,:,0], cmap=cmap, vmax=vmax)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
# Masks
if masks is not None:
mask = masks[:, :, i]
masked_image = apply_mask(masked_image, mask, color)
# Mask Polygon
# Pad to ensure proper polygons for masks that touch image edges.
padded_mask = np.zeros(
(mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
padded_mask[1:-1, 1:-1] = mask
contours = find_contours(padded_mask, 0.5)
for verts in contours:
# Subtract the padding and flip (y, x) to (x, y)
verts = np.fliplr(verts) - 1
p = Polygon(verts, facecolor="none", edgecolor=color)
ax.add_patch(p)
#ax.imshow(masked_image[:,:,0])#.astype(np.uint8)
def display_table(table):
"""Display values in a table format.
table: an iterable of rows, and each row is an iterable of values.
"""
html = ""
for row in table:
row_html = ""
for col in row:
row_html += "<td>{:40}</td>".format(str(col))
html += "<tr>" + row_html + "</tr>"
html = "<table>" + html + "</table>"
IPython.display.display(IPython.display.HTML(html))
def display_weight_stats(model):
"""Scans all the weights in the model and returns a list of tuples
that contain stats about each weight.
"""
layers = model.get_trainable_layers()
table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
for l in layers:
weight_values = l.get_weights() # list of Numpy arrays
weight_tensors = l.weights # list of TF tensors
for i, w in enumerate(weight_values):
weight_name = weight_tensors[i].name
# Detect problematic layers. Exclude biases of conv layers.
alert = ""
if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
alert += "<span style='color:red'>*** dead?</span>"
if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
alert += "<span style='color:red'>*** Overflow?</span>"
# Add row
table.append([
weight_name + alert,
str(w.shape),
"{:+9.4f}".format(w.min()),
"{:+10.4f}".format(w.max()),
"{:+9.4f}".format(w.std()),
])
display_table(table)
|
gpl-2.0
|
lhilt/scipy
|
scipy/signal/wavelets.py
|
4
|
10504
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.special import comb
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {'0': v / sm}
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float, optional
Omega0. Default is 5
s : float, optional
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool, optional
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of `w`.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
This version has a correction
term to improve admissibility. For `w` greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to `s`.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where `r` is the sampling rate.
Note: This function was created before `cwt` and is not compatible
with it.
"""
x = np.linspace(-s * 2 * np.pi, s * 2 * np.pi, M)
output = np.exp(1j * w * x)
if complete:
output -= np.exp(-0.5 * (w**2))
output *= np.exp(-0.5 * (x**2)) * np.pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/4``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(length,width)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(widths), len(data)).
Notes
-----
::
length = min(10 * width[ii], len(data))
cwt[ii,:] = signal.convolve(data, wavelet(length,
width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 200, endpoint=False)
>>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
>>> widths = np.arange(1, 31)
>>> cwtmatr = signal.cwt(sig, signal.ricker, widths)
>>> plt.imshow(cwtmatr, extent=[-1, 1, 31, 1], cmap='PRGn', aspect='auto',
... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
>>> plt.show()
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
|
bsd-3-clause
|
alekz112/statsmodels
|
statsmodels/sandbox/examples/example_crossval.py
|
33
|
2232
|
import numpy as np
from statsmodels.sandbox.tools import cross_val
if __name__ == '__main__':
#A: josef-pktd
import statsmodels.api as sm
from statsmodels.api import OLS
#from statsmodels.datasets.longley import load
from statsmodels.datasets.stackloss import load
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt,
default_latex_fmt, default_html_fmt)
import numpy as np
data = load()
data.exog = sm.tools.add_constant(data.exog, prepend=False)
resols = sm.OLS(data.endog, data.exog).fit()
print('\n OLS leave 1 out')
for inidx, outidx in cross_val.LeaveOneOut(len(data.endog)):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
print(data.endog[outidx], res.model.predict(res.params, data.exog[outidx,:], end=' '))
print(data.endog[outidx] - res.model.predict(res.params, data.exog[outidx,:]))
print('\n OLS leave 2 out')
resparams = []
for inidx, outidx in cross_val.LeavePOut(len(data.endog), 2):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
#print ((data.endog[outidx] - res.model.predict(data.exog[outidx,:]))**2).sum()
resparams.append(res.params)
resparams = np.array(resparams)
print(resparams)
doplots = 1
if doplots:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.figure()
figtitle = 'Leave2out parameter estimates'
t = plt.gcf().text(0.5,
0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
for i in range(resparams.shape[1]):
plt.subplot(4, 2, i+1)
plt.hist(resparams[:,i], bins = 10)
#plt.title("Leave2out parameter estimates")
plt.show()
for inidx, outidx in cross_val.KStepAhead(20,2):
#note the following were broken because KStepAhead returns now a slice by default
print(inidx)
print(np.ones(20)[inidx].sum(), np.arange(20)[inidx][-4:])
print(outidx)
print(np.nonzero(np.ones(20)[outidx])[0][()])
|
bsd-3-clause
|
elijah513/scikit-learn
|
examples/ensemble/plot_adaboost_regression.py
|
311
|
1529
|
"""
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
PhasesResearchLab/ESPEI
|
espei/plot.py
|
1
|
46082
|
"""
Plotting of input data and calculated database quantities
"""
import warnings
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import numpy as np
import tinydb
from sympy import Symbol
from pycalphad import Model, calculate, equilibrium, variables as v
from pycalphad.core.utils import unpack_components
from pycalphad.plot.utils import phase_legend
from pycalphad.plot.eqplot import eqplot, _map_coord_to_variable, unpack_condition
from espei.error_functions.non_equilibrium_thermochemical_error import get_prop_samples
from espei.utils import bib_marker_map
from espei.core_utils import get_prop_data, filter_configurations, filter_temperatures, symmetry_filter, ravel_zpf_values
from espei.parameter_selection.utils import _get_sample_condition_dicts
from espei.sublattice_tools import recursive_tuplify, endmembers_from_interaction
from espei.utils import build_sitefractions
plot_mapping = {
'T': 'Temperature (K)',
'CPM': 'Heat Capacity (J/K-mol-atom)',
'HM': 'Enthalpy (J/mol-atom)',
'SM': 'Entropy (J/K-mol-atom)',
'CPM_FORM': 'Heat Capacity of Formation (J/K-mol-atom)',
'HM_FORM': 'Enthalpy of Formation (J/mol-atom)',
'SM_FORM': 'Entropy of Formation (J/K-mol-atom)',
'CPM_MIX': 'Heat Capacity of Mixing (J/K-mol-atom)',
'HM_MIX': 'Enthalpy of Mixing (J/mol-atom)',
'SM_MIX': 'Entropy of Mixing (J/K-mol-atom)'
}
def plot_parameters(dbf, comps, phase_name, configuration, symmetry, datasets=None, fig=None, require_data=True):
"""
Plot parameters of interest compared with data in subplots of a single figure
Parameters
----------
dbf : Database
pycalphad thermodynamic database containing the relevant parameters.
comps : list
Names of components to consider in the calculation.
phase_name : str
Name of the considered phase phase
configuration : tuple
Sublattice configuration to plot, such as ('CU', 'CU') or (('CU', 'MG'), 'CU')
symmetry : list
List of lists containing indices of symmetric sublattices e.g. [[0, 1], [2, 3]]
datasets : PickleableTinyDB
ESPEI datasets to compare against. If None, nothing is plotted.
fig : matplotlib.Figure
Figure to create with axes as subplots.
require_data : bool
If True, plot parameters that have data corresponding data. Defaults to
True. Will raise an error for non-interaction configurations.
Returns
-------
None
Examples
--------
>>> # plot the LAVES_C15 (Cu)(Mg) endmember
>>> plot_parameters(dbf, ['CU', 'MG'], 'LAVES_C15', ('CU', 'MG'), symmetry=None, datasets=datasets) # doctest: +SKIP
>>> # plot the mixing interaction in the first sublattice
>>> plot_parameters(dbf, ['CU', 'MG'], 'LAVES_C15', (('CU', 'MG'), 'MG'), symmetry=None, datasets=datasets) # doctest: +SKIP
"""
deprecation_msg = (
"`espei.plot.plot_parameters` is deprecated and will be removed in ESPEI 0.9. "
"Please use `plot_endmember` or `plot_interaction` instead."
)
warnings.warn(deprecation_msg, category=FutureWarning)
em_plots = [('T', 'CPM'), ('T', 'CPM_FORM'), ('T', 'SM'), ('T', 'SM_FORM'),
('T', 'HM'), ('T', 'HM_FORM')]
mix_plots = [ ('Z', 'HM_MIX'), ('Z', 'SM_MIX')]
comps = sorted(comps)
mod = Model(dbf, comps, phase_name)
mod.models['idmix'] = 0
# This is for computing properties of formation
mod_norefstate = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps})
# Is this an interaction parameter or endmember?
if any([isinstance(conf, list) or isinstance(conf, tuple) for conf in configuration]):
plots = mix_plots
else:
plots = em_plots
# filter which parameters to plot by the data that exists
if require_data and datasets is not None:
filtered_plots = []
for x_val, y_val in plots:
desired_props = [y_val.split('_')[0]+'_FORM', y_val] if y_val.endswith('_MIX') else [y_val]
solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
data = get_prop_data(comps, phase_name, desired_props, datasets, additional_query=solver_qry)
data = filter_configurations(data, configuration, symmetry)
data = filter_temperatures(data)
if len(data) > 0:
filtered_plots.append((x_val, y_val, data))
elif require_data:
raise ValueError('Plots require datasets, but no datasets were passed.')
elif plots == em_plots and not require_data:
# How we treat temperature dependence is ambiguous when there is no data, so we raise an error
raise ValueError('The "require_data=False" option is not supported for non-mixing configurations.')
elif datasets is not None:
filtered_plots = []
for x_val, y_val in plots:
desired_props = [y_val.split('_')[0]+'_FORM', y_val] if y_val.endswith('_MIX') else [y_val]
solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
data = get_prop_data(comps, phase_name, desired_props, datasets, additional_query=solver_qry)
data = filter_configurations(data, configuration, symmetry)
data = filter_temperatures(data)
filtered_plots.append((x_val, y_val, data))
else:
filtered_plots = [(x_val, y_val, []) for x_val, y_val in plots]
num_plots = len(filtered_plots)
if num_plots == 0:
return
if not fig:
fig = plt.figure(figsize=plt.figaspect(num_plots))
# plot them
for i, (x_val, y_val, data) in enumerate(filtered_plots):
if y_val.endswith('_FORM'):
ax = fig.add_subplot(num_plots, 1, i+1)
ax = _compare_data_to_parameters(dbf, comps, phase_name, data, mod_norefstate, configuration, x_val, y_val, ax=ax)
else:
ax = fig.add_subplot(num_plots, 1, i+1)
ax = _compare_data_to_parameters(dbf, comps, phase_name, data, mod, configuration, x_val, y_val, ax=ax)
def dataplot(comps, phases, conds, datasets, tielines=True, ax=None, plot_kwargs=None, tieline_plot_kwargs=None) -> plt.Axes:
"""
Plot datapoints corresponding to the components, phases, and conditions.
Parameters
----------
comps : list
Names of components to consider in the calculation.
phases : []
Names of phases to consider in the calculation.
conds : dict
Maps StateVariables to values and/or iterables of values.
datasets : PickleableTinyDB
tielines : bool
If True (default), plot the tie-lines from the data
ax : matplotlib.Axes
Default axes used if not specified.
plot_kwargs : dict
Additional keyword arguments to pass to the matplotlib plot function for points
tieline_plot_kwargs : dict
Additional keyword arguments to pass to the matplotlib plot function for tielines
Returns
-------
matplotlib.Axes
A plot of phase equilibria points as a figure
Examples
--------
>>> from espei.datasets import load_datasets, recursive_glob # doctest: +SKIP
>>> from espei.plot import dataplot # doctest: +SKIP
>>> datasets = load_datasets(recursive_glob('.', '*.json')) # doctest: +SKIP
>>> my_phases = ['BCC_A2', 'CUMG2', 'FCC_A1', 'LAVES_C15', 'LIQUID'] # doctest: +SKIP
>>> my_components = ['CU', 'MG' 'VA'] # doctest: +SKIP
>>> conditions = {v.P: 101325, v.T: (500, 1000, 10), v.X('MG'): (0, 1, 0.01)} # doctest: +SKIP
>>> dataplot(my_components, my_phases, conditions, datasets) # doctest: +SKIP
"""
indep_comps = [key for key, value in conds.items() if isinstance(key, v.X) and len(np.atleast_1d(value)) > 1]
indep_pots = [key for key, value in conds.items() if ((key == v.T) or (key == v.P)) and len(np.atleast_1d(value)) > 1]
plot_kwargs = plot_kwargs or {}
phases = sorted(phases)
# determine what the type of plot will be
if len(indep_comps) == 1 and len(indep_pots) == 1:
projection = None
elif len(indep_comps) == 2 and len(indep_pots) == 0:
projection = 'triangular'
else:
raise ValueError('The eqplot projection is not defined and cannot be autodetected. There are {} independent compositions and {} indepedent potentials.'.format(len(indep_comps), len(indep_pots)))
if projection is None:
x = indep_comps[0].species.name
y = indep_pots[0]
elif projection == 'triangular':
x = indep_comps[0].species.name
y = indep_comps[1].species.name
# set up plot if not done already
if ax is None:
ax = plt.gca(projection=projection)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.tick_params(axis='both', which='major', labelsize=14)
ax.grid(True)
plot_title = '-'.join([component.title() for component in sorted(comps) if component != 'VA'])
ax.set_title(plot_title, fontsize=20)
ax.set_xlabel('X({})'.format(x), labelpad=15, fontsize=20)
ax.set_xlim((0, 1))
if projection is None:
ax.set_ylabel(plot_mapping.get(str(y), y), fontsize=20)
elif projection == 'triangular':
ax.set_ylabel('X({})'.format(y), labelpad=15, fontsize=20)
ax.set_ylim((0, 1))
ax.yaxis.label.set_rotation(60)
# Here we adjust the x coordinate of the ylabel.
# We make it reasonably comparable to the position of the xlabel from the xaxis
# As the figure size gets very large, the label approaches ~0.55 on the yaxis
# 0.55*cos(60 deg)=0.275, so that is the xcoord we are approaching.
ax.yaxis.label.set_va('baseline')
fig_x_size = ax.figure.get_size_inches()[0]
y_label_offset = 1 / fig_x_size
ax.yaxis.set_label_coords(x=(0.275 - y_label_offset), y=0.5)
output = 'ZPF'
# TODO: used to include VA. Should this be added by default. Can't determine presence of VA in eq.
# Techincally, VA should not be present in any phase equilibria.
# For now, don't get datasets that are a subset of the current system because this breaks mass balance assumptions in ravel_zpf_values
desired_data = datasets.search((tinydb.where('output') == output) &
(tinydb.where('components').test(lambda x: (set(x).issubset(comps + ['VA'])) and (len(set(x) - {'VA'}) == (len(indep_comps) + 1)))) &
(tinydb.where('phases').test(lambda x: len(set(phases).intersection(x)) > 0)))
# get all the possible references from the data and create the bibliography map
bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
symbol_map = bib_marker_map(bib_reference_keys)
# The above handled the phases as in the equilibrium, but there may be
# phases that are in the datasets but not in the equilibrium diagram that
# we would like to plot point for (they need color maps).
# To keep consistent colors with the equilibrium diagram, we will append
# the new phases from the datasets to the existing phases in the equilibrium
# calculation.
data_phases = set()
for entry in desired_data:
data_phases.update(set(entry['phases']))
new_phases = sorted(list(data_phases.difference(set(phases))))
phases.extend(new_phases)
legend_handles, phase_color_map = phase_legend(phases)
if projection is None:
# TODO: There are lot of ways this could break in multi-component situations
# plot x vs. T
y = 'T'
# handle plotting kwargs
scatter_kwargs = {'markersize': 6, 'markeredgewidth': 1}
# raise warnings if any of the aliased versions of the default values are used
possible_aliases = [('markersize', 'ms'), ('markeredgewidth', 'mew')]
for actual_arg, aliased_arg in possible_aliases:
if aliased_arg in plot_kwargs:
warnings.warn("'{0}' passed as plotting keyword argument to dataplot, but the alias '{1}' is already set to '{2}'. Use the full version of the keyword argument '{1}' to override the default.".format(aliased_arg, actual_arg, scatter_kwargs.get(actual_arg)))
scatter_kwargs.update(plot_kwargs)
eq_dict = ravel_zpf_values(desired_data, [x])
# two phase
updated_tieline_plot_kwargs = {'linewidth':1, 'color':'k'}
if tieline_plot_kwargs is not None:
updated_tieline_plot_kwargs.update(tieline_plot_kwargs)
equilibria_to_plot = eq_dict.get(1, [])
equilibria_to_plot.extend(eq_dict.get(2, []))
equilibria_to_plot.extend(eq_dict.get(3, []))
for eq in equilibria_to_plot:
# plot the scatter points for the right phases
x_points, y_points = [], []
for phase_name, comp_dict, ref_key in eq:
sym_ref = symbol_map[ref_key]
x_val, y_val = comp_dict[x], comp_dict[y]
if x_val is not None and y_val is not None:
ax.plot(x_val, y_val,
label=sym_ref['formatted'],
fillstyle=sym_ref['markers']['fillstyle'],
marker=sym_ref['markers']['marker'],
linestyle='',
color=phase_color_map[phase_name],
**scatter_kwargs)
x_points.append(x_val)
y_points.append(y_val)
if tielines and len(x_points) > 1:
# plot the tielines
if all([xx is not None and yy is not None for xx, yy in zip(x_points, y_points)]):
ax.plot(x_points, y_points, **updated_tieline_plot_kwargs)
elif projection == 'triangular':
scatter_kwargs = {'markersize': 4, 'markeredgewidth': 0.4}
# raise warnings if any of the aliased versions of the default values are used
possible_aliases = [('markersize', 'ms'), ('markeredgewidth', 'mew')]
for actual_arg, aliased_arg in possible_aliases:
if aliased_arg in plot_kwargs:
warnings.warn("'{0}' passed as plotting keyword argument to dataplot, but the alias '{1}' is already set to '{2}'. Use the full version of the keyword argument '{1}' to override the default.".format(aliased_arg, actual_arg, scatter_kwargs.get(actual_arg)))
scatter_kwargs.update(plot_kwargs)
eq_dict = ravel_zpf_values(desired_data, [x, y], {'T': conds[v.T]})
# two phase
updated_tieline_plot_kwargs = {'linewidth':1, 'color':'k'}
if tieline_plot_kwargs is not None:
updated_tieline_plot_kwargs.update(tieline_plot_kwargs)
equilibria_to_plot = eq_dict.get(1, [])
equilibria_to_plot.extend(eq_dict.get(2, []))
for eq in equilibria_to_plot: # list of things in equilibrium
# plot the scatter points for the right phases
x_points, y_points = [], []
for phase_name, comp_dict, ref_key in eq:
sym_ref = symbol_map[ref_key]
x_val, y_val = comp_dict[x], comp_dict[y]
if x_val is not None and y_val is not None:
ax.plot(x_val, y_val,
label=sym_ref['formatted'],
fillstyle=sym_ref['markers']['fillstyle'],
marker=sym_ref['markers']['marker'],
linestyle='',
color=phase_color_map[phase_name],
**scatter_kwargs)
x_points.append(x_val)
y_points.append(y_val)
if tielines and len(x_points) > 1:
# plot the tielines
if all([xx is not None and yy is not None for xx, yy in zip(x_points, y_points)]):
ax.plot(x_points, y_points, **updated_tieline_plot_kwargs)
# three phase
updated_tieline_plot_kwargs = {'linewidth':1, 'color':'r'}
if tieline_plot_kwargs is not None:
updated_tieline_plot_kwargs.update(tieline_plot_kwargs)
for eq in eq_dict.get(3,[]): # list of things in equilibrium
# plot the scatter points for the right phases
x_points, y_points = [], []
for phase_name, comp_dict, ref_key in eq:
x_val, y_val = comp_dict[x], comp_dict[y]
x_points.append(x_val)
y_points.append(y_val)
# Make sure the triangle completes
x_points.append(x_points[0])
y_points.append(y_points[0])
# plot
# check for None values
if all([xx is not None and yy is not None for xx, yy in zip(x_points, y_points)]):
ax.plot(x_points, y_points, **updated_tieline_plot_kwargs)
# now we will add the symbols for the references to the legend handles
for ref_key in bib_reference_keys:
mark = symbol_map[ref_key]['markers']
# The legend marker edge width appears smaller than in the plot.
# We will add this small hack to increase the width in the legend only.
legend_kwargs = scatter_kwargs.copy()
legend_kwargs['markeredgewidth'] = 1
legend_kwargs['markersize'] = 6
legend_handles.append(mlines.Line2D([], [], linestyle='',
color='black', markeredgecolor='black',
label=symbol_map[ref_key]['formatted'],
fillstyle=mark['fillstyle'],
marker=mark['marker'],
**legend_kwargs))
# finally, add the completed legend
ax.legend(handles=legend_handles, loc='center left', bbox_to_anchor=(1, 0.5))
return ax
def eqdataplot(eq, datasets, ax=None, plot_kwargs=None):
"""
Plot datapoints corresponding to the components and phases in the eq Dataset.
A convenience function for dataplot.
Parameters
----------
eq : xarray.Dataset
Result of equilibrium calculation.
datasets : PickleableTinyDB
Database of phase equilibria datasets
ax : matplotlib.Axes
Default axes used if not specified.
plot_kwargs : dict
Keyword arguments to pass to dataplot
Returns
-------
A plot of phase equilibria points as a figure
Examples
--------
>>> from pycalphad import equilibrium, Database, variables as v # doctest: +SKIP
>>> from pycalphad.plot.eqplot import eqplot # doctest: +SKIP
>>> from espei.datasets import load_datasets, recursive_glob # doctest: +SKIP
>>> datasets = load_datasets(recursive_glob('.', '*.json')) # doctest: +SKIP
>>> dbf = Database('my_databases.tdb') # doctest: +SKIP
>>> my_phases = list(dbf.phases.keys()) # doctest: +SKIP
>>> eq = equilibrium(dbf, ['CU', 'MG', 'VA'], my_phases, {v.P: 101325, v.T: (500, 1000, 10), v.X('MG'): (0, 1, 0.01)}) # doctest: +SKIP
>>> ax = eqplot(eq) # doctest: +SKIP
>>> ax = eqdataplot(eq, datasets, ax=ax) # doctest: +SKIP
"""
deprecation_msg = (
"`espei.plot.eqdataplot` is deprecated and will be removed in ESPEI 0.9. "
"Users depending on plotting from an `pycalphad.equilibrium` result should use "
"`pycalphad.plot.eqplot.eqplot` along with `espei.plot.dataplot` directly. "
"Note that pycalphad's mapping can offer signficant reductions in calculation "
"time compared to using `equilibrium` followed by `eqplot`."
)
warnings.warn(deprecation_msg, category=FutureWarning)
# TODO: support reference legend
conds = OrderedDict([(_map_coord_to_variable(key), unpack_condition(np.asarray(value)))
for key, value in sorted(eq.coords.items(), key=str)
if (key == 'T') or (key == 'P') or (key.startswith('X_'))])
phases = list(map(str, sorted(set(np.array(eq.Phase.values.ravel(), dtype='U')) - {''}, key=str)))
comps = list(map(str, sorted(np.array(eq.coords['component'].values, dtype='U'), key=str)))
ax = dataplot(comps, phases, conds, datasets, ax=ax, plot_kwargs=plot_kwargs)
return ax
def multiplot(dbf, comps, phases, conds, datasets, eq_kwargs=None, plot_kwargs=None, data_kwargs=None):
"""
Plot a phase diagram with datapoints described by datasets.
This is a wrapper around pycalphad.equilibrium, pycalphad's eqplot, and dataplot.
Parameters
----------
dbf : Database
pycalphad thermodynamic database containing the relevant parameters.
comps : list
Names of components to consider in the calculation.
phases : list
Names of phases to consider in the calculation.
conds : dict
Maps StateVariables to values and/or iterables of values.
datasets : PickleableTinyDB
Database of phase equilibria datasets
eq_kwargs : dict
Keyword arguments passed to pycalphad equilibrium()
plot_kwargs : dict
Keyword arguments passed to pycalphad eqplot()
data_kwargs : dict
Keyword arguments passed to dataplot()
Returns
-------
A phase diagram with phase equilibria data as a figure
Examples
--------
>>> from pycalphad import Database, variables as v # doctest: +SKIP
>>> from pycalphad.plot.eqplot import eqplot # doctest: +SKIP
>>> from espei.datasets import load_datasets, recursive_glob # doctest: +SKIP
>>> datasets = load_datasets(recursive_glob('.', '*.json')) # doctest: +SKIP
>>> dbf = Database('my_databases.tdb') # doctest: +SKIP
>>> my_phases = list(dbf.phases.keys()) # doctest: +SKIP
>>> multiplot(dbf, ['CU', 'MG', 'VA'], my_phases, {v.P: 101325, v.T: 1000, v.X('MG'): (0, 1, 0.01)}, datasets) # doctest: +SKIP
"""
deprecation_msg = (
"`espei.plot.multiplot` is deprecated and will be removed in ESPEI 0.9. "
"Users depending on `multiplot` should use pycalphad's `binplot` or `ternplot` "
"followed by `espei.plot.dataplot`. Note that pycalphad's mapping can offer "
"signficant reductions in calculation time compared to using `multiplot`. See "
"ESPEI's recipes for an example: "
"https://espei.org/en/latest/recipes.html#plot-phase-diagram-with-data"
)
warnings.warn(deprecation_msg, category=FutureWarning)
eq_kwargs = eq_kwargs or dict()
plot_kwargs = plot_kwargs or dict()
data_kwargs = data_kwargs or dict()
eq_result = equilibrium(dbf, comps, phases, conds, **eq_kwargs)
ax = eqplot(eq_result, **plot_kwargs)
ax = eqdataplot(eq_result, datasets, ax=ax, plot_kwargs=data_kwargs)
return ax
def plot_interaction(dbf, comps, phase_name, configuration, output, datasets=None, symmetry=None, ax=None, plot_kwargs=None, dataplot_kwargs=None) -> plt.Axes:
"""
Return one set of plotted Axes with data compared to calculated parameters
Parameters
----------
dbf : Database
pycalphad thermodynamic database containing the relevant parameters.
comps : Sequence[str]
Names of components to consider in the calculation.
phase_name : str
Name of the considered phase phase
configuration : Tuple[Tuple[str]]
ESPEI-style configuration
output : str
Model property to plot on the y-axis e.g. ``'HM_MIX'``, or ``'SM_MIX'``.
Must be a ``'_MIX'`` property.
datasets : tinydb.TinyDB
symmetry : list
List of lists containing indices of symmetric sublattices e.g. [[0, 1], [2, 3]]
ax : plt.Axes
Default axes used if not specified.
plot_kwargs : Optional[Dict[str, Any]]
Keyword arguments to ``ax.plot`` for the predicted data.
dataplot_kwargs : Optional[Dict[str, Any]]
Keyword arguments to ``ax.plot`` the observed data.
Returns
-------
plt.Axes
"""
if not output.endswith('_MIX'):
raise ValueError("`plot_interaction` only supports HM_MIX, SM_MIX, or CPM_MIX outputs.")
if not plot_kwargs:
plot_kwargs = {}
if not dataplot_kwargs:
dataplot_kwargs = {}
if not ax:
ax = plt.gca()
# Plot predicted values from the database
mod = Model(dbf, comps, phase_name)
mod.models['idmix'] = 0 # TODO: better reference state handling
endpoints = endmembers_from_interaction(configuration)
first_endpoint = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))
second_endpoint = _translate_endmember_to_array(endpoints[1], mod.ast.atoms(v.SiteFraction))
grid = np.linspace(0, 1, num=100)
point_matrix = grid[None].T * second_endpoint + (1 - grid)[None].T * first_endpoint
# TODO: Real temperature support
point_matrix = point_matrix[None, None]
predicted_values = calculate(
dbf, comps, [phase_name], output=output,
T=298.15, P=101325, points=point_matrix, model=mod)[output].values.flatten()
plot_kwargs.setdefault('label', 'This work')
plot_kwargs.setdefault('color', 'k')
ax.plot(grid, predicted_values, **plot_kwargs)
# Plot the observed values from the datasets
# TODO: model exclusions handling
# TODO: better reference state handling
mod_srf = Model(dbf, comps, phase_name, parameters={'GHSER'+c.upper(): 0 for c in comps})
mod_srf.models = {'ref': mod_srf.models['ref']}
# _MIX assumption
prop = output.split('_MIX')[0]
desired_props = (f"{prop}_MIX", f"{prop}_FORM")
if datasets is not None:
solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
desired_data = get_prop_data(comps, phase_name, desired_props, datasets, additional_query=solver_qry)
desired_data = filter_configurations(desired_data, configuration, symmetry)
desired_data = filter_temperatures(desired_data)
else:
desired_data = []
species = unpack_components(dbf, comps)
# phase constituents are Species objects, so we need to be doing intersections with those
phase_constituents = dbf.phases[phase_name].constituents
# phase constituents must be filtered to only active
constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
subl_dof = list(map(len, constituents))
calculate_dict = get_prop_samples(desired_data, constituents)
sample_condition_dicts = _get_sample_condition_dicts(calculate_dict, subl_dof)
interacting_subls = [c for c in recursive_tuplify(configuration) if isinstance(c, tuple)]
if (len(set(interacting_subls)) == 1) and (len(interacting_subls[0]) == 2):
# This configuration describes all sublattices with the same two elements interacting
# In general this is a high-dimensional space; just plot the diagonal to see the disordered mixing
endpoints = endmembers_from_interaction(configuration)
endpoints = [endpoints[0], endpoints[-1]]
disordered_config = True
else:
disordered_config = False
bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
symbol_map = bib_marker_map(bib_reference_keys)
for data in desired_data:
indep_var_data = None
response_data = np.zeros_like(data['values'], dtype=np.float_)
if disordered_config:
# Take the second element of the first interacting sublattice as the coordinate
# Because it's disordered all sublattices should be equivalent
# TODO: Fix this to filter because we need to guarantee the plot points are disordered
occ = data['solver']['sublattice_occupancies']
subl_idx = np.nonzero([isinstance(c, (list, tuple)) for c in occ[0]])[0]
if len(subl_idx) > 1:
subl_idx = int(subl_idx[0])
else:
subl_idx = int(subl_idx)
indep_var_data = [c[subl_idx][1] for c in occ]
else:
interactions = np.array([cond_dict[Symbol('YS')] for cond_dict in sample_condition_dicts])
indep_var_data = 1 - (interactions+1)/2
if data['output'].endswith('_FORM'):
# All the _FORM data we have still has the lattice stability contribution
# Need to zero it out to shift formation data to mixing
temps = data['conditions'].get('T', 298.15)
pressures = data['conditions'].get('P', 101325)
points = build_sitefractions(phase_name, data['solver']['sublattice_configurations'],
data['solver']['sublattice_occupancies'])
for point_idx in range(len(points)):
missing_variables = mod_srf.ast.atoms(v.SiteFraction) - set(points[point_idx].keys())
# Set unoccupied values to zero
points[point_idx].update({key: 0 for key in missing_variables})
# Change entry to a sorted array of site fractions
points[point_idx] = list(OrderedDict(sorted(points[point_idx].items(), key=str)).values())
points = np.array(points, dtype=np.float_)
# TODO: Real temperature support
points = points[None, None]
stability = calculate(dbf, comps, [phase_name], output=data['output'][:-5],
T=temps, P=pressures, points=points,
model=mod_srf)
response_data -= stability[data['output'][:-5]].values.squeeze()
response_data += np.array(data['values'], dtype=np.float_)
response_data = response_data.flatten()
ref = data.get('reference', '')
dataplot_kwargs.setdefault('markersize', 8)
dataplot_kwargs.setdefault('linestyle', 'none')
dataplot_kwargs.setdefault('clip_on', False)
# Cannot use setdefault because it won't overwrite previous iterations
dataplot_kwargs['label'] = symbol_map[ref]['formatted']
dataplot_kwargs['marker'] = symbol_map[ref]['markers']['marker']
dataplot_kwargs['fillstyle'] = symbol_map[ref]['markers']['fillstyle']
ax.plot(indep_var_data, response_data, **dataplot_kwargs)
ax.set_xlim((0, 1))
ax.set_xlabel(str(':'.join(endpoints[0])) + ' to ' + str(':'.join(endpoints[1])))
ax.set_ylabel(plot_mapping.get(output, output))
leg = ax.legend(loc=(1.01, 0)) # legend outside
leg.get_frame().set_edgecolor('black')
return ax
def plot_endmember(dbf, comps, phase_name, configuration, output, datasets=None, symmetry=None, x='T', ax=None, plot_kwargs=None, dataplot_kwargs=None) -> plt.Axes:
"""
Return one set of plotted Axes with data compared to calculated parameters
Parameters
----------
dbf : Database
pycalphad thermodynamic database containing the relevant parameters.
comps : Sequence[str]
Names of components to consider in the calculation.
phase_name : str
Name of the considered phase phase
configuration : Tuple[Tuple[str]]
ESPEI-style configuration
output : str
Model property to plot on the y-axis e.g. ``'HM_MIX'``, or ``'SM_MIX'``.
Must be a ``'_MIX'`` property.
datasets : tinydb.TinyDB
symmetry : list
List of lists containing indices of symmetric sublattices e.g. [[0, 1], [2, 3]]
ax : plt.Axes
Default axes used if not specified.
plot_kwargs : Optional[Dict[str, Any]]
Keyword arguments to ``ax.plot`` for the predicted data.
dataplot_kwargs : Optional[Dict[str, Any]]
Keyword arguments to ``ax.plot`` the observed data.
Returns
-------
plt.Axes
"""
if output.endswith('_MIX'):
raise ValueError("`plot_interaction` only supports HM, HM_FORM, SM, SM_FORM or CPM, CPM_FORM outputs.")
if x not in ('T',):
raise ValueError(f'`x` passed to `plot_endmember` must be "T" got {x}')
if not plot_kwargs:
plot_kwargs = {}
if not dataplot_kwargs:
dataplot_kwargs = {}
if not ax:
ax = plt.gca()
if datasets is not None:
solver_qry = (tinydb.where('solver').test(symmetry_filter, configuration, recursive_tuplify(symmetry) if symmetry else symmetry))
desired_data = get_prop_data(comps, phase_name, output, datasets, additional_query=solver_qry)
desired_data = filter_configurations(desired_data, configuration, symmetry)
desired_data = filter_temperatures(desired_data)
else:
desired_data = []
# Plot predicted values from the database
endpoints = endmembers_from_interaction(configuration)
if len(endpoints) != 1:
raise ValueError(f"The configuration passed to `plot_endmember` must be an endmebmer configuration. Got {configuration}")
if output.endswith('_FORM'):
# TODO: better reference state handling
mod = Model(dbf, comps, phase_name, parameters={'GHSER'+(c.upper()*2)[:2]: 0 for c in comps})
prop = output[:-5]
else:
mod = Model(dbf, comps, phase_name)
prop = output
endmember = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))[None, None]
# Set up the domain of the calculation
species = unpack_components(dbf, comps)
# phase constituents are Species objects, so we need to be doing intersections with those
phase_constituents = dbf.phases[phase_name].constituents
# phase constituents must be filtered to only active
constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
calculate_dict = get_prop_samples(desired_data, constituents)
potential_values = np.asarray(calculate_dict[x] if len(calculate_dict[x]) > 0 else 298.15)
potential_grid = np.linspace(max(potential_values.min()-1, 0), potential_values.max()+1, num=100)
predicted_values = calculate(dbf, comps, [phase_name], output=prop, T=potential_grid, P=101325, points=endmember, model=mod)[prop].values.flatten()
ax.plot(potential_grid, predicted_values, **plot_kwargs)
# Plot observed values
# TODO: model exclusions handling
bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
symbol_map = bib_marker_map(bib_reference_keys)
for data in desired_data:
indep_var_data = None
response_data = np.zeros_like(data['values'], dtype=np.float_)
indep_var_data = np.array(data['conditions'][x], dtype=np.float_).flatten()
response_data += np.array(data['values'], dtype=np.float_)
response_data = response_data.flatten()
ref = data.get('reference', '')
dataplot_kwargs.setdefault('markersize', 8)
dataplot_kwargs.setdefault('linestyle', 'none')
dataplot_kwargs.setdefault('clip_on', False)
# Cannot use setdefault because it won't overwrite previous iterations
dataplot_kwargs['label'] = symbol_map[ref]['formatted']
dataplot_kwargs['marker'] = symbol_map[ref]['markers']['marker']
dataplot_kwargs['fillstyle'] = symbol_map[ref]['markers']['fillstyle']
ax.plot(indep_var_data, response_data, **dataplot_kwargs)
ax.set_xlabel(plot_mapping.get(x, x))
ax.set_ylabel(plot_mapping.get(output, output))
leg = ax.legend(loc=(1.01, 0)) # legend outside
leg.get_frame().set_edgecolor('black')
return ax
def _compare_data_to_parameters(dbf, comps, phase_name, desired_data, mod, configuration, x, y, ax=None):
"""
Return one set of plotted Axes with data compared to calculated parameters
Parameters
----------
dbf : Database
pycalphad thermodynamic database containing the relevant parameters.
comps : list
Names of components to consider in the calculation.
phase_name : str
Name of the considered phase phase
desired_data :
mod : Model
A pycalphad Model. The Model may or may not have the reference state zeroed out for formation properties.
configuration :
x : str
Model property to plot on the x-axis e.g. 'T', 'HM_MIX', 'SM_FORM'
y : str
Model property to plot on the y-axis e.g. 'T', 'HM_MIX', 'SM_FORM'
ax : matplotlib.Axes
Default axes used if not specified.
Returns
-------
matplotlib.Axes
"""
species = unpack_components(dbf, comps)
# phase constituents are Species objects, so we need to be doing intersections with those
phase_constituents = dbf.phases[phase_name].constituents
# phase constituents must be filtered to only active:
constituents = [[sp.name for sp in sorted(subl_constituents.intersection(species))] for subl_constituents in phase_constituents]
subl_dof = list(map(len, constituents))
calculate_dict = get_prop_samples(desired_data, constituents)
sample_condition_dicts = _get_sample_condition_dicts(calculate_dict, subl_dof)
endpoints = endmembers_from_interaction(configuration)
interacting_subls = [c for c in recursive_tuplify(configuration) if isinstance(c, tuple)]
disordered_config = False
if (len(set(interacting_subls)) == 1) and (len(interacting_subls[0]) == 2):
# This configuration describes all sublattices with the same two elements interacting
# In general this is a high-dimensional space; just plot the diagonal to see the disordered mixing
endpoints = [endpoints[0], endpoints[-1]]
disordered_config = True
if not ax:
fig = plt.figure(figsize=plt.figaspect(1))
ax = fig.gca()
bar_chart = False
bar_labels = []
bar_data = []
if y.endswith('_FORM'):
# We were passed a Model object with zeroed out reference states
yattr = y[:-5]
else:
yattr = y
if len(endpoints) == 1:
# This is an endmember so we can just compute T-dependent stuff
Ts = calculate_dict['T']
temperatures = np.asarray(Ts if len(Ts) > 0 else 298.15)
if temperatures.min() != temperatures.max():
temperatures = np.linspace(temperatures.min(), temperatures.max(), num=100)
else:
# We only have one temperature: let's do a bar chart instead
bar_chart = True
temperatures = temperatures.min()
endmember = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))[None, None]
predicted_quantities = calculate(dbf, comps, [phase_name], output=yattr,
T=temperatures, P=101325, points=endmember, model=mod, mode='numpy')
if y == 'HM' and x == 'T':
# Shift enthalpy data so that value at minimum T is zero
predicted_quantities[yattr] -= predicted_quantities[yattr].sel(T=temperatures[0]).values.flatten()
response_data = predicted_quantities[yattr].values.flatten()
if not bar_chart:
extra_kwargs = {}
if len(response_data) < 10:
extra_kwargs['markersize'] = 20
extra_kwargs['marker'] = '.'
extra_kwargs['linestyle'] = 'none'
extra_kwargs['clip_on'] = False
ax.plot(temperatures, response_data,
label='This work', color='k', **extra_kwargs)
ax.set_xlabel(plot_mapping.get(x, x))
ax.set_ylabel(plot_mapping.get(y, y))
else:
bar_labels.append('This work')
bar_data.append(response_data[0])
elif len(endpoints) == 2:
# Binary interaction parameter
first_endpoint = _translate_endmember_to_array(endpoints[0], mod.ast.atoms(v.SiteFraction))
second_endpoint = _translate_endmember_to_array(endpoints[1], mod.ast.atoms(v.SiteFraction))
point_matrix = np.linspace(0, 1, num=100)[None].T * second_endpoint + \
(1 - np.linspace(0, 1, num=100))[None].T * first_endpoint
# TODO: Real temperature support
point_matrix = point_matrix[None, None]
predicted_quantities = calculate(dbf, comps, [phase_name], output=yattr,
T=300, P=101325, points=point_matrix, model=mod, mode='numpy')
response_data = predicted_quantities[yattr].values.flatten()
if not bar_chart:
extra_kwargs = {}
if len(response_data) < 10:
extra_kwargs['markersize'] = 20
extra_kwargs['marker'] = '.'
extra_kwargs['linestyle'] = 'none'
extra_kwargs['clip_on'] = False
ax.plot(np.linspace(0, 1, num=100), response_data, label='This work', color='k', **extra_kwargs)
ax.set_xlim((0, 1))
ax.set_xlabel(str(':'.join(endpoints[0])) + ' to ' + str(':'.join(endpoints[1])))
ax.set_ylabel(plot_mapping.get(y, y))
else:
bar_labels.append('This work')
bar_data.append(response_data[0])
else:
raise NotImplementedError('No support for plotting configuration {}'.format(configuration))
bib_reference_keys = sorted({entry.get('reference', '') for entry in desired_data})
symbol_map = bib_marker_map(bib_reference_keys)
for data in desired_data:
indep_var_data = None
response_data = np.zeros_like(data['values'], dtype=np.float_)
if x == 'T' or x == 'P':
indep_var_data = np.array(data['conditions'][x], dtype=np.float_).flatten()
elif x == 'Z':
if disordered_config:
# Take the second element of the first interacting sublattice as the coordinate
# Because it's disordered all sublattices should be equivalent
# TODO: Fix this to filter because we need to guarantee the plot points are disordered
occ = data['solver']['sublattice_occupancies']
subl_idx = np.nonzero([isinstance(c, (list, tuple)) for c in occ[0]])[0]
if len(subl_idx) > 1:
subl_idx = int(subl_idx[0])
else:
subl_idx = int(subl_idx)
indep_var_data = [c[subl_idx][1] for c in occ]
else:
interactions = np.array([cond_dict[Symbol('YS')] for cond_dict in sample_condition_dicts])
indep_var_data = 1 - (interactions+1)/2
if y.endswith('_MIX') and data['output'].endswith('_FORM'):
# All the _FORM data we have still has the lattice stability contribution
# Need to zero it out to shift formation data to mixing
mod_latticeonly = Model(dbf, comps, phase_name, parameters={'GHSER'+c.upper(): 0 for c in comps})
mod_latticeonly.models = {key: value for key, value in mod_latticeonly.models.items()
if key == 'ref'}
temps = data['conditions'].get('T', 300)
pressures = data['conditions'].get('P', 101325)
points = build_sitefractions(phase_name, data['solver']['sublattice_configurations'],
data['solver']['sublattice_occupancies'])
for point_idx in range(len(points)):
missing_variables = mod_latticeonly.ast.atoms(v.SiteFraction) - set(points[point_idx].keys())
# Set unoccupied values to zero
points[point_idx].update({key: 0 for key in missing_variables})
# Change entry to a sorted array of site fractions
points[point_idx] = list(OrderedDict(sorted(points[point_idx].items(), key=str)).values())
points = np.array(points, dtype=np.float_)
# TODO: Real temperature support
points = points[None, None]
stability = calculate(dbf, comps, [phase_name], output=data['output'][:-5],
T=temps, P=pressures, points=points,
model=mod_latticeonly, mode='numpy')
response_data -= stability[data['output'][:-5]].values.squeeze()
response_data += np.array(data['values'], dtype=np.float_)
response_data = response_data.flatten()
if not bar_chart:
extra_kwargs = {}
extra_kwargs['markersize'] = 8
extra_kwargs['linestyle'] = 'none'
extra_kwargs['clip_on'] = False
ref = data.get('reference', '')
mark = symbol_map[ref]['markers']
ax.plot(indep_var_data, response_data,
label=symbol_map[ref]['formatted'],
marker=mark['marker'],
fillstyle=mark['fillstyle'],
**extra_kwargs)
else:
bar_labels.append(data.get('reference', None))
bar_data.append(response_data[0])
if bar_chart:
ax.barh(0.02 * np.arange(len(bar_data)), bar_data,
color='k', height=0.01)
endmember_title = ' to '.join([':'.join(i) for i in endpoints])
ax.get_figure().suptitle('{} (T = {} K)'.format(endmember_title, temperatures), fontsize=20)
ax.set_yticks(0.02 * np.arange(len(bar_data)))
ax.set_yticklabels(bar_labels, fontsize=20)
# This bar chart is rotated 90 degrees, so "y" is now x
ax.set_xlabel(plot_mapping.get(y, y))
else:
ax.set_frame_on(False)
leg = ax.legend(loc='best')
leg.get_frame().set_edgecolor('black')
return ax
def _translate_endmember_to_array(endmember, variables):
site_fractions = sorted(variables, key=str)
frac_array = np.zeros(len(site_fractions))
for idx, component in enumerate(endmember):
frac_array[site_fractions.index(v.SiteFraction(site_fractions[0].phase_name, idx, component))] = 1
return frac_array
|
mit
|
costypetrisor/scikit-learn
|
sklearn/grid_search.py
|
4
|
34405
|
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import _check_cv as check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# get complete grid and yield from it
param_grid = list(ParameterGrid(self.param_distributions))
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class ChangedBehaviorWarning(UserWarning):
pass
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
degree=..., gamma=..., kernel='rbf', max_iter=-1,
probability=False, random_state=None, shrinking=True,
tol=..., verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
|
bsd-3-clause
|
nhuntwalker/astroML
|
book_figures/chapter10/fig_arrival_time.py
|
3
|
4743
|
"""
Arrival Time Analysis
---------------------
Figure 10.24
Modeling time-dependent flux based on arrival time data. The top-right panel
shows the rate r(t) = r0[1 + a sin(omega t + phi)], along with the locations
of the 104 detected photons. The remaining panels show the model contours
calculated via MCMC; dotted lines indicate the input parameters. The likelihood
used is from eq. 10.83. Note the strong covariance between phi and omega in
the bottom-right panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
# Hack to fix import issue in older versions of pymc
import scipy
import scipy.misc
scipy.derivative = scipy.misc.derivative
import pymc
from astroML.plotting.mcmc import plot_mcmc
from astroML.decorators import pickle_results
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Create some data
np.random.seed(1)
N_expected = 100
# define our rate function
def rate_func(t, r0, a, omega, phi):
return r0 * (1 + a * np.sin(omega * t + phi))
# define the time steps
t = np.linspace(0, 10, 10000)
Dt = t[1] - t[0]
# compute the total rate in each bin
r0_true = N_expected / (t[-1] - t[0])
a_true = 0.8
phi_true = np.pi / 4
omega_true = 4
r = rate_func(t, r0_true, a_true, omega_true, phi_true)
# randomly sample photon arrivals from the rate
x = np.random.random(t.shape)
obs = (x < r * Dt).astype(int)
print("Number of observed photons:", np.sum(obs))
#----------------------------------------------------------------------
# Set up our MCMC model
r0 = pymc.Uniform('r0', 0, 1000, value=10)
a = pymc.Uniform('a', 0, 1, value=0.5)
phi = pymc.Uniform('phi', -np.pi, np.pi, value=0)
log_omega = pymc.Uniform('log_omega', 0, np.log(10), value=np.log(4))
# uniform prior on log(omega)
@pymc.deterministic
def omega(log_omega=log_omega):
return np.exp(log_omega)
@pymc.deterministic
def rate(r0=r0, a=a, omega=omega, phi=phi):
return rate_func(t, r0, a, omega, phi)
def arrival_like(obs, rate, Dt):
"""likelihood for arrival time"""
N = np.sum(obs)
return (N * np.log(Dt)
- np.sum(rate) * Dt
+ np.sum(np.log(rate[obs > 0])))
Arrival = pymc.stochastic_from_dist('arrival',
logp=arrival_like,
dtype=np.float,
mv=True)
obs_dist = Arrival('obs_dist', rate=rate, Dt=Dt, observed=True, value=obs)
model = dict(obs_dist=obs_dist, r0=r0, a=a, phi=phi,
log_omega=log_omega, omega=omega,
rate=rate)
#------------------------------------------------------------
# Compute results (and save to a pickle file)
@pickle_results('arrival_times.pkl')
def compute_model(niter=20000, burn=2000):
S = pymc.MCMC(model)
S.sample(iter=niter, burn=burn)
traces = [S.trace(s)[:] for s in ['r0', 'a', 'phi', 'omega']]
return traces
traces = compute_model()
labels = ['$r_0$', '$a$', r'$\phi$', r'$\omega$']
limits = [(6.5, 13.5), (0.55, 1.1), (-0.3, 1.7), (3.75, 4.25)]
true = [r0_true, a_true, phi_true, omega_true]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
# This function plots multiple panels with the traces
plot_mcmc(traces, labels=labels, limits=limits, true_values=true, fig=fig,
bins=30, colors='k')
# Plot the model of arrival times
ax = fig.add_axes([0.5, 0.75, 0.45, 0.2])
ax.fill_between(t, 0, rate_func(t, r0_true, a_true, omega_true, phi_true),
facecolor='#DDDDDD', edgecolor='black')
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_xlim(t[0], t[-1])
ax.set_ylim(0, 20)
ax.set_ylabel('$r(t)$')
# Plot the actual data
ax = fig.add_axes([0.5, 0.7, 0.45, 0.04], yticks=[])
t_obs = t[obs > 0]
ax.scatter(t_obs, np.random.RandomState(0).rand(len(t_obs)),
marker='+', color='k')
ax.set_xlim(t[0], t[-1])
ax.set_ylim(-0.3, 1.3)
ax.set_xlabel('$t$')
plt.show()
|
bsd-2-clause
|
mahak/spark
|
python/pyspark/pandas/data_type_ops/categorical_ops.py
|
5
|
2506
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from itertools import chain
from typing import Union
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.pandas._typing import Dtype, IndexOpsLike
from pyspark.pandas.data_type_ops.base import DataTypeOps
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import pandas_on_spark_type
from pyspark.sql import functions as F
class CategoricalOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with categorical types.
"""
@property
def pretty_name(self) -> str:
return "categoricals"
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return pd.Categorical.from_codes(
col, categories=self.dtype.categories, ordered=self.dtype.ordered
)
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.cat.codes
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
dtype, spark_type = pandas_on_spark_type(dtype)
if isinstance(dtype, CategoricalDtype) and dtype.categories is None:
return index_ops.copy()
categories = index_ops.dtype.categories
if len(categories) == 0:
scol = SF.lit(None)
else:
kvs = chain(
*[(SF.lit(code), SF.lit(category)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = map_scol.getItem(index_ops.spark.column)
return index_ops._with_new_scol(
scol.alias(index_ops._internal.data_spark_column_names[0])
).astype(dtype)
|
apache-2.0
|
nhejazi/scikit-learn
|
sklearn/utils/testing.py
|
2
|
31011
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
import unittest
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from nose.tools import raises
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal", "SkipTest"]
_dummy = unittest.TestCase('__init__')
assert_equal = _dummy.assertEqual
assert_not_equal = _dummy.assertNotEqual
assert_true = _dummy.assertTrue
assert_false = _dummy.assertFalse
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_in = _dummy.assertIn
assert_not_in = _dummy.assertNotIn
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_less_equal = _dummy.assertLessEqual
assert_greater_equal = _dummy.assertGreaterEqual
try:
assert_raises_regex = _dummy.assertRaisesRegex
except AttributeError:
# Python 2.7
assert_raises_regex = _dummy.assertRaisesRegexp
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
function : callable
Calable object to raise error
*args : the positional arguments to `function`.
**kw : the keyword arguments to `function`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : array-like or sparse matrix
First array to compare.
y : array-like or sparse matrix
Second array to compare.
rtol : float, optional
relative tolerance; see numpy.allclose
atol : float, optional
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : string, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble", "ClassifierChain"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
class _named_check(object):
"""Wraps a check to show a useful description
Parameters
----------
check : function
Must have ``__name__`` and ``__call__``
arg_text : str
A summary of arguments to the check
"""
# Setting the description on the function itself can give incorrect results
# in failing tests
def __init__(self, check, arg_text):
self.check = check
self.description = ("{0[1]}.{0[3]}:{1.__name__}({2})".format(
inspect.stack()[1], check, arg_text))
def __call__(self, *args, **kwargs):
return self.check(*args, **kwargs)
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments"""
# NOTE this works only in python3.5
if sys.version_info < (3, 5):
NotImplementedError("_get_args is not available for python < 3.5")
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func, class_name=None):
"""Get function full name
Parameters
----------
func : callable
The function object.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if class_name is not None:
parts.append(class_name)
elif hasattr(func, 'im_class'):
parts.append(func.im_class.__name__)
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None, class_name=None):
"""Helper to check docstring
Parameters
----------
func : callable
The function object to test.
doc : str, optional (default: None)
Docstring if it is passed manually to the test.
ignore : None | list
Parameters to ignore.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func, class_name=class_name)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
args = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(args) > 0 and args[0] == 'self':
args.remove('self')
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_names = []
for name, type_definition, param_doc in doc['Parameters']:
if (type_definition.strip() == "" or
type_definition.strip().startswith(':')):
param_name = name.lstrip()
# If there was no space between name and the colon
# "verbose:" -> len(["verbose", ""][0]) -> 7
# If "verbose:"[7] == ":", then there was no space
if (':' not in param_name or
param_name[len(param_name.split(':')[0].strip())] == ':'):
incorrect += [func_name +
' There was no space between the param name and '
'colon ("%s")' % name]
else:
incorrect += [func_name + ' Incorrect type definition for '
'param: "%s" (type definition was "%s")'
% (name.split(':')[0], type_definition)]
if '*' not in name:
param_names.append(name.split(':')[0].strip('` '))
param_names = list(filter(lambda x: x not in ignore, param_names))
if len(param_names) != len(args):
bad = str(sorted(list(set(param_names) ^ set(args))))
incorrect += [func_name + ' arg mismatch: ' + bad]
else:
for n1, n2 in zip(param_names, args):
if n1 != n2:
incorrect += [func_name + ' ' + n1 + ' != ' + n2]
return incorrect
|
bsd-3-clause
|
kpespinosa/BuildingMachineLearningSystemsWithPython
|
ch09/02_ceps_based_classifier.py
|
24
|
3574
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from collections import defaultdict
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.metrics import auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import confusion_matrix
from utils import plot_roc, plot_confusion_matrix, GENRE_LIST
from ceps import read_ceps
genre_list = GENRE_LIST
def train_model(clf_factory, X, Y, name, plot=False):
labels = np.unique(Y)
cv = ShuffleSplit(
n=len(X), n_iter=1, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = defaultdict(list)
precisions, recalls, thresholds = defaultdict(
list), defaultdict(list), defaultdict(list)
roc_scores = defaultdict(list)
tprs = defaultdict(list)
fprs = defaultdict(list)
clfs = [] # just to later get the median
cms = []
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf = clf_factory()
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
scores.append(test_score)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
y_pred = clf.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
cms.append(cm)
for label in labels:
y_label_test = np.asarray(y_test == label, dtype=int)
proba = clf.predict_proba(X_test)
proba_label = proba[:, label]
precision, recall, pr_thresholds = precision_recall_curve(
y_label_test, proba_label)
pr_scores[label].append(auc(recall, precision))
precisions[label].append(precision)
recalls[label].append(recall)
thresholds[label].append(pr_thresholds)
fpr, tpr, roc_thresholds = roc_curve(y_label_test, proba_label)
roc_scores[label].append(auc(fpr, tpr))
tprs[label].append(tpr)
fprs[label].append(fpr)
if plot:
for label in labels:
print("Plotting %s" % genre_list[label])
scores_to_sort = roc_scores[label]
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
desc = "%s %s" % (name, genre_list[label])
plot_roc(roc_scores[label][median], desc, tprs[label][median],
fprs[label][median], label='%s vs rest' % genre_list[label])
all_pr_scores = np.asarray(pr_scores.values()).flatten()
summary = (np.mean(scores), np.std(scores),
np.mean(all_pr_scores), np.std(all_pr_scores))
print("%.3f\t%.3f\t%.3f\t%.3f\t" % summary)
return np.mean(train_errors), np.mean(test_errors), np.asarray(cms)
def create_model():
from sklearn.linear_model.logistic import LogisticRegression
clf = LogisticRegression()
return clf
if __name__ == "__main__":
X, y = read_ceps(genre_list)
train_avg, test_avg, cms = train_model(
create_model, X, y, "Log Reg CEPS", plot=True)
cm_avg = np.mean(cms, axis=0)
cm_norm = cm_avg / np.sum(cm_avg, axis=0)
plot_confusion_matrix(cm_norm, genre_list, "ceps",
"Confusion matrix of a CEPS based classifier")
|
mit
|
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection
|
test/gabor/gabor_fourier_plots.py
|
1
|
3944
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import fftpack
def plot_surface3d(Z):
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.floor(Z.shape[1] / 2).astype(int)
y = np.floor(Z.shape[0] / 2).astype(int)
X = np.arange(-x, x + 1, 1)
Y = np.arange(-y, y + 1, 1)
X, Y = np.meshgrid(X, Y)
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.RdBu, linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.set_facecolor('gray')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def plot_surface2d(Z):
#plt.imshow(Z, cmap='Greys')
plt.imshow(Z)
#plt.gca().invert_yaxis()
plt.show()
"""
import pylab as py
py.figure(1)
py.clf()
py.imshow(Z)
py.show()
"""
def plot_gabor_fourier_2d(kernels, fouriers, nrows, ncols, figsize=(14,8)):
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
plt.gray()
fig.suptitle('Filtros de Gabor con sus respectivas transformadas de Fourier', fontsize=12)
merge = [None]*(len(kernels)+len(fouriers))
merge[::2] = kernels
merge[1::2] = fouriers
for val, ax in zip(merge, fig.axes):
ax.imshow(val, interpolation='nearest')
#ax.invert_yaxis()
ax.axis('off')
plt.show()
def plot_sum_gabor_fourier_2d(sum_kernel, sum_fourier):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6,4))
fig.suptitle('Suma de los filtros de Gabor y Fourier', fontsize=10)
ax = axes[0]
ax.imshow(sum_kernel, cmap='Greys')
ax.axis('off')
ax = axes[1]
ax.imshow(sum_fourier)
ax.axis('off')
plt.show()
def power(image, kernel):
from scipy import ndimage as ndi
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap') ** 2 +
ndi.convolve(image, np.imag(kernel), mode='wrap') ** 2)
def fourier(kernel):
# Fourier Transform
# Take the fourier transform of the image.
F1 = fftpack.fft2(kernel.real)
# Now shift so that low spatial frequencies are in the center.
F2 = fftpack.fftshift(F1)
# the 2D power spectrum is:
psd2D = np.abs(F2)
return psd2D
"""
Genera el banco de filtros y su plotting
----------------------------------------
"""
import sys
fmax = 1/2
ns = 2
nd = 4
v = 2
b = 1.177
from gabor_filter_banks import gabor_bank
gabor_filter_bank = gabor_bank(fmax, ns, nd)
kernels = []
fouriers = []
# Recoge todos los kernels del banco de filtros
for gabor in gabor_filter_bank:
kernels.append(gabor.kernel)
# Calcula la transformada de Fourier para cada kernel
for kernel in kernels:
fouriers.append(fourier(kernel))
kernels_real = []
# Recoge las componentes reales de los kernels
for kernel in kernels:
kernels_real.append(kernel.real)
#plot_gabor_fourier_2d(kernels_real, fouriers, ns, nd*2)
"""
----------------------------------------
"""
"""
Muestra la suma de todos los filtros de gabor y fourier
-------------------------------------------------------
"""
from skimage.transform import resize
fourier_resize = []
for f in fouriers:
fourier_resize.append(resize(f, (100,100)))
sum_fourier = np.zeros((100,100))
for val in fourier_resize:
sum_fourier += val
kernel_resize = []
for k in kernels_real:
kernel_resize.append(resize(k, (100,100)))
sum_kernel = np.zeros((100,100))
for val in kernel_resize:
sum_kernel += val
plot_surface2d(sum_fourier)
#plot_sum_gabor_fourier_2d(sum_kernel, sum_fourier)
"""
-------------------------------------------------------
"""
|
mit
|
chairmanmeow50/Brainspawn
|
brainspawn/plots/plot.py
|
1
|
2326
|
""" Module for plots. Plots with one matplotlib subplot should extend from
this class. Otherwise if multiple plots are needed, must extend from actual
BasePlot.
"""
import gtk
from abc import ABCMeta, abstractmethod
from plots.base_plot import BasePlot
from plots.configuration import Configuration
import settings
class Plot(BasePlot):
"""Plot class.
In order to add plots to the visualizer, you will want to
inherit from this class.
Note that subclasses must call the base class constructor.
"""
__metaclass__ = ABCMeta
def __init__(self, main_controller, nengo_obj, capability):
super(Plot, self).__init__(main_controller, nengo_obj, capability)
""" Plot constructor.
Initializes default config values for all plots, and sets up the plot
view.
Args:
main_controller (VisualizerController): The top-level controller
of the visualizer.
nengo_obj (Nengo): The nengo object this plot is visualizing.
capability (Capability): The capability of the object that this
graph is visualizing.
config (dict): saved config options for the plot.
"""
self.axes = self.figure.add_subplot(111)
self.axes.patch.set_alpha(0.0)
self.init_default_config()
def init_default_config(self):
""" Sets default config values for all plots.
The values contained in this dictionary are used to configure the plot.
For convenience in title string formatting,
we set 'TARGET' and 'DATA' to default values of the
target object, and represented data, respectively.
"""
super(Plot, self).init_default_config()
self.config['title'] = Configuration(
configurable=True, display_name="Title", data_type='text',
value='{TARGET} - {DATA}', function=self.set_title)
def set_title(self, title):
""" Returns the title.
"""
self.axes.set_title(self.title)
def set_default_xlim(self, end_time, x_width):
""" Sets x axes to be a constant width, meaning it won't change in
scale.
"""
if end_time > x_width:
self.axes.set_xlim([end_time - x_width, end_time])
else:
self.axes.set_xlim([0, x_width])
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
examples/linear_model/plot_ols_3d.py
|
350
|
2040
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
|
bsd-3-clause
|
sonnyhu/scikit-learn
|
examples/feature_selection/plot_rfe_with_cross_validation.py
|
161
|
1380
|
"""
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
|
bsd-3-clause
|
abelfunctions/abelfunctions
|
abelfunctions/differentials.py
|
1
|
23366
|
r"""Differentials :mod:`abelfunctions.differentials`
================================================
This module contains functions for computing a basis of holomorphic
differentials of a Riemann surface given by a complex plane algebraic curve
:math:`f \in \mathbb{C}[x,y]`. A differential :math:`\omega = h(x,y)dx` defined
on a Riemann surface :math:`X` is holomorphic on :math:`X` if it is holomorphic
at every point on :math:`X`.
The function :func:`differentials` computes the basis of holomorphic
differentials from an input algebraic curve :math:`f = f(x,y)`. The
differentials themselves are encapsulated in a :class:`Differential` Cython
class.
Classes
-------
.. autosummary::
Differential
Functions
---------
.. autosummary::
differentials
mnuk_conditions
References
----------
.. [Mnuk] M. Mnuk, "An algebraic approach to computing adjoint curves", Journal
of Symbolic Computation, vol. 23 (2-3), pp. 229-40, 1997.
Examples
--------
Contents
--------
"""
from abelfunctions.divisor import Divisor
from abelfunctions.integralbasis import integral_basis
from abelfunctions.puiseux import puiseux
from abelfunctions.singularities import singularities, _transform
from sage.all import infinity, CC, fast_callable
from sage.rings.polynomial.all import PolynomialRing
from sage.rings.rational_field import QQ
from sage.rings.qqbar import QQbar
import numpy
def mnuk_conditions(g, b, generic_adjoint):
"""Determine the Mnuk conditions on the coefficients of :math:`P`.
Determine the conditions on the coefficients `c` of `P` at the integral
basis element `b` modulo the curve `g = g(u,v)`. See [Mnuk] for details.
Parameters
----------
g : curve
An algebraic curve.
b : integral basis function
An an element of the basis of the integral closure of the coordinate
ring of `g`. See :func:`abelfunctions.integralbasis.integral_basis`.
generic_adjoint : polynomial
A generic adjoint polynomial as provided by :func:`differentials`. Only
one instance is created for caching and performance purposes.
Returns
-------
conditions : list
A list of expressions from which a system of equations is build to
determine the differentials.
"""
# extract rings. the generic adjoint should be a member of R[*c][u,v] where
# *c is a vector of the indeterminants. we will need to convert it to a
# polynomial in R[u,v,*c] and then back (see below)
R = g.parent()
S = generic_adjoint.parent()
B = S.base_ring()
c = B.gens()
T = QQbar[R.variable_names() + B.variable_names()]
# compute b_num(x,y) * P(x,y) and reduce modulo the defining polynomial g.
# we do this by casting the polynomial into the ring QQbar(x,*c)[y]. (the
# coefficients of y in g need to be units)
B = PolynomialRing(QQbar, [R.variable_names()[0]] + list(B.variable_names()))
Q = B.fraction_field()[R.variable_names()[1]]
u,v = map(Q,R.gens())
numer = b.numerator()
denom = b.denominator()
expr = numer(u,v) * generic_adjoint(u,v)
modulus = g(u,v)
r_reduced_mod_g = expr % modulus
# now mod out by the denominator to get the remaining component, R(x,y). we
# need to cast into the ring QQbar[y,*c][x] in order to do so. (note that
# we don't need a base fraction field since the denominator is univariate
# and therefore the leading coefficient is always a unit)
u,v = map(T, R.gens())
r = r_reduced_mod_g(v).numerator()
r_reduced_mod_denom = r.polynomial(u) % T(denom).polynomial(u)
# finally, coerce the result to QQbar[*c][x,y] in order to obtain the
# coefficients as linear combinations of the c_ij's.
r = T(r_reduced_mod_denom(u)) # first need to coerce to "largest" ring, T
u, v = map(S, R.gens())
c = [S(z) for z in c]
args = [u, v] + c
r = r(*args)
conditions = r.coefficients()
return conditions
def recenter_curve(g, singular_point):
r"""Returns a curve centered at a given singular point.
Given a singular point :math:`(x : y : z) = (\alpha : \beta : \gamma)`on a
Riemann surface :func:`recenter_curve` returns an affine curve :math:`h =
h(u,v)` such that the singularity occurs at :math:`u = 0` where
* :math:`u,v = x,y` if :math:`\gamma = 1`
* :math:`u,v = x,z` if :math:`\gamma = 0`
:func:`recenter_curve` is written in such a way to preserve the base ring
of the original curve in the case when it's a polynomial ring. For example,
if :math:`g \in R[c][x,y]` then `h \in R[c][u,v]`.
See Also
--------
abelfunctions.singularities._transform : recenters a given curve at the
singular point such that the singularity occurs at :math:`u = u0`
"""
# recenter the curve and adjoint polynomial at the singular point: find
# the affine plane u,v such that the singularity occurs at u=0
gsing,u0,v0 = _transform(g,singular_point)
R = gsing.parent()
u,v = R.gens()
h = gsing(u+u0,v)
return h
def differentials_numerators(f):
"""Return the numerators of a basis of holomorphic differentials on a Riemann
surface.
Parameters
----------
f : plane algebraic curve
Returns
-------
differentials : list
A list of :class:`Differential`s representing *a* basis of Abelian
differentials of the first kind.
"""
# homogenize and compute total degree
R = f.parent().change_ring(QQbar)
x,y = R.gens()
d = f.total_degree()
# construct the generalized adjoint polynomial. we want to think of it as
# an element of B[*c][x,y] where B is the base ring of f and *c are the
# indeterminates
cvars = ['c_%d_%d'%(i,j) for i in range(d-2) for j in range(d-2)]
vars = list(R.variable_names()) + cvars
C = PolynomialRing(QQbar, cvars)
S = PolynomialRing(C, [x,y])
T = PolynomialRing(QQbar, vars)
c = S.base_ring().gens()
x,y = S(x),S(y)
P = sum(c[j+(d-2)*i] * x**i * y**j
for i in range(d-2) for j in range(d-2)
if i+j <= d-3)
# for each singular point [x:y:z] = [alpha:beta:gamma], map f onto the
# "most convenient and appropriate" affine subspace, (u,v), and center at
# u=0. determine the conditions on P
singular_points = singularities(f)
conditions = []
for singular_point, _ in singular_points:
# recenter the curve and adjoint polynomial at the singular point: find
# the affine plane u,v such that the singularity occurs at u=0
g = recenter_curve(f, singular_point)
Ptilde = recenter_curve(P, singular_point)
# compute the intergral basis at the recentered singular point
# and determine the Mnuk conditions of the adjoint polynomial
b = integral_basis(g)
for bi in b:
conditions_bi = mnuk_conditions(g, bi, Ptilde)
conditions.extend(conditions_bi)
# reduce the general adjoint modulo the ideal generated by the integral
# basis conditions. the coefficients of the remaining c_ij's form the
# numerators of a basis of abelian differentials of the first kind.
#
# additionally, we try to coerce the conditions to over QQ for speed. it's
# questionable in this situation whether there is a noticible performance
# gain but it does suppress the "slow toy implementation" warning.
try:
T = T.change_ring(QQ)
ideal = T.ideal(conditions)
basis = ideal.groebner_basis()
except:
pass
ideal = T.ideal(conditions)
basis = ideal.groebner_basis()
P_reduced = P(T(x), T(y))
if basis != [0]:
P_reduced = P_reduced.reduce(basis)
U = R[S.base_ring().variable_names()]
args = [U(x),U(y)] + [U(ci) for ci in c]
Pc = P_reduced(*args)
numerators = Pc.coefficients()
return numerators
def differentials(RS):
r"""Returns a basis for the space of Abelian differentials of the first kind on
the Riemann surface obtained from the curve `f`.
Parameters
----------
f : curve
A plane algebraic curve.
Returns
-------
diffs : list
A holomorphic differentials basis.
"""
f = RS.f.change_ring(QQbar)
R = f.parent()
x,y = R.gens()
dfdy = f.derivative(y)
numers = differentials_numerators(f)
diffs = [AbelianDifferentialFirstKind(RS, numer, dfdy) for numer in numers]
return diffs
def validate_differentials(differential_list, genus):
"""Confirm that the proposed differentials have correct properties.
Parameters
----------
diff_list: list
A list of Differentials whose properties are to be validated
genus: int
Genus of the Riemann surface
Returns
-------
is_valid: bool
A bool indicating whether the differentials are valid
Notes
-----
The present conditions are very general. More detailed tests will likely
be added in the future.
"""
is_valid = True
try:
# Check types
assert(all(isinstance(diff, Differential) for diff in differential_list))
# Check that the number of differentials matches the genus
assert(len(differential_list) == genus)
# Check that they are all defined on the same surface
if len(differential_list) > 0:
riemann_surface = differential_list[0].RS
assert(all(diff.RS is riemann_surface for diff in differential_list))
except AssertionError:
is_valid = False
return is_valid
class Differential:
"""A differential one-form which can be defined on a Riemann surface.
Attributes
----------
numer, denom : MultivariatePolynomial
Fast multivariate polynomial objects representing the numerator
and denominator of the differential.
Methods
-------
eval
as_numer_denom
as_sympy_expr
Notes
-----
To reduce the number of discriminant points to check for computing the
valuation divisor we keep separate the numerator and denominator of the
Differential. This behavior may change after implementing different types
of differentials.
"""
def __init__(self, RS, *args):
"""Create a differential on the Riemann surface `RS`.
"""
if (len(args) < 1) or (len(args) > 2):
raise ValueError('Instantiate Differential with Sympy expression '
'or numerator/denominator pair.')
# determine the numerator and denominator of the differentials
if len(args) == 1:
self.numer = args[0].numerator()
self.denom = args[0].denominator()
elif len(args) == 2:
self.numer = args[0]
self.denom = args[1]
x,y = RS.f.parent().gens()
self.RS = RS
self.differential = self.numer / self.denom
self.numer_n = fast_callable(self.numer.change_ring(CC), vars=[x,y],
domain=numpy.complex)
self.denom_n = fast_callable(self.denom.change_ring(CC), vars=[x,y],
domain=numpy.complex)
def __repr__(self):
return str(self.differential)
def __call__(self, *args, **kwds):
return self.eval(*args, **kwds)
def eval(self, *args, **kwds):
r"""Evaluate the differential at the complex point :math:`(x,y)`.
"""
val = self.numer_n(*args, **kwds) / self.denom_n(*args, **kwds)
return numpy.complex(val)
def centered_at_place(self, P, order=None):
r"""Rewrite the differential in terms of the local coordinates at `P`.
If `P` is a regular place, then returns `self` as a sympy
expression. Otherwise, if `P` is a discriminant place
:math:`P(t) = \{x(t), y(t)\}` then returns
.. math::
\omega |_P = q(x(t),y(t)) x'(t) / \partial_y f(x(t),y(t)).
Parameters
----------
P : Place
order : int, optional
Passed to :meth:`PuiseuxTSeries.eval_y`.
Returns
-------
sympy.Expr
"""
# by default, non-discriminant places do not store Pusieux series
# expansions. this might change in the future
if P.is_discriminant():
p = P.puiseux_series
else:
p = puiseux(self.RS.f, P.x, P.y)[0]
p.extend(order)
# substitute Puiseux series expansion into the differrential and expand
# as a Laurent series
xt = p.xpart
yt = p.ypart.add_bigoh(p.order)
dxdt = xt.derivative()
omega = self.numer(xt,yt) * dxdt / self.denom(xt,yt)
return omega
def localize(self, *args, **kwds):
r"""Same as :meth:`centered_at_place`."""
return self.centered_at_place(*args, **kwds)
def evaluate(self, gamma, t):
r"""Evaluates `omega` along the path at `N` uniform points.
.. note::
Note: right now it doesn't matter what the values in `t`
are. This function will simply turn `t` into a bunch of
uniformly distributed points between 0 and 1.
Parameters
----------
omega : Differential
t : double[:]
An array of `t` between 0 and 1.
Returns
-------
complex[:]
The differential omega evaluated along the path at `N` points.
"""
return gamma.evaluate(self, t)
def _find_necessary_xvalues(self):
r"""Returns a list of x-points over which the places appearing in the
valuation divisor are found.
:py:meth:`valuation_divisor` requires a necessary list of x-values from
which to compute places which may appear in the valuation divisor of
the differential.
In the case when `self.denom` is equal to :math:`\partial_y f` we
simply use the discriminant points of the curve.
Parameters
----------
none
Returns
-------
list
"""
# we need to work over QQbar anyway
f = self.RS.f.change_ring(QQbar)
R = f.parent()
x,y = R.gens()
# get possible x-values from the numerator by computing the roots of
# the resolvent with the curve f
numer = self.numer
res = f.resultant(numer,y).univariate_polynomial()
numer_roots = res.roots(ring=QQbar, multiplicities=False)
# get possible x-values from the denominator. in the case when the
# denominator is dfdy these are simply the discriminant points
denom = self.differential.denominator()
if denom == f.derivative(y):
denom_roots = self.RS.discriminant_points
else:
res = f.resultant(denom,y).univariate_polynomial()
denom_roots = res.roots(ring=QQbar, multiplicities=False)
# finally, the possible x-points contributed by dx are the discriminant
# points of the curve
discriminant_points = self.RS.discriminant_points
# form the set of x-values over which to compute places. reorder
# entries such that x=0 and x=oo appear first because differential
# numerators tend to be monomial, resulting in better performance.
xvalues = []
roots = set([]).union(numer_roots)
roots = roots.union(denom_roots)
roots = roots.union(discriminant_points)
if 0 in roots:
xvalues.append(0)
roots.discard(0)
xvalues.append(infinity) # account for all places at infinity
xvalues.extend(roots)
return xvalues
def valuation_divisor(self, **kwds):
r"""Returns the valuation divisor of the differential.
This is a generic algorithm for computing valuation divisors and should
only be used if nothing is known about the differential in question.
If the differential is Abelian of the first kind (holomorphic) then
create an instance of :class:`AbelianDifferentialFirstKind`. Similarly,
if the differential is Abelian of the second kind then create an
instance of :class:`AbelianDifferentialSecondKind`. These implement
versions of :meth:`valuation_divisor` that use properties of the
differential to save on time.
Parameters
----------
none
Returns
-------
Divisor
"""
xvalues = self._find_necessary_xvalues()
# for each xvalue, compute the places above it and determine the
# valuation of the differential over each of these places
D = Divisor(self.RS,0)
genus = self.RS.genus()
for alpha in xvalues:
places_above_alpha = self.RS(alpha)
for P in places_above_alpha:
n = P.valuation(self)
D += n*P
# the valuation divisor of a generic meromorphic differential is still
# canonical. check the degree condition
target_genus = 2*genus - 2
if D.degree != target_genus:
raise ValueError(
'Could not compute valuation divisor of %s: '
'did not reach genus requirement.'%self)
return D
def plot(self, gamma, N=256, grid=False, **kwds):
r"""Plot the differential along the RiemannSurfacePath `gamma`.
Parameters
----------
gamma : RiemannSurfacePath
A path along which to evaluate the differential.
N : int
Number of interpolating points to use when plotting the
value of the differential along the path `gamma`
grid : boolean
(Default: `False`) If true, draw gridlines at each "segment"
of the parameterized RiemannSurfacePath. See the
`RiemannSurfacePath` documentation for more information.
Returns
-------
matplotlib.Figure
"""
import matplotlib.pyplot as plt # XXX switch to Sage plotting
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.hold(True)
nseg = len(gamma.segments)
t = numpy.linspace(0,1,N/nseg)
for k in range(nseg):
segment = gamma.segments[k]
osegment = numpy.array(self.evaluate(segment,t),dtype=complex)
tsegment = (t+k)/nseg;
ax.plot(tsegment, osegment.real, 'b')
ax.plot(tsegment, osegment.imag, 'r--')
# plot gridlines at the interface between each set of segments
if grid:
ticks = numpy.linspace(0,1,len(gamma.segments)+1)
ax.xaxis.set_ticks(ticks)
ax.grid(True, which='major')
return fig
def as_numer_denom(self):
"""Returns the differential as a numerator, denominator pair.
Returns
-------
list, sympy.Expr
Note
----
Artifact syntax from Sympy implementation days.
"""
return self.numer, self.denom
def as_expression(self):
"""Returns the differential as a Sympy expression.
Returns
-------
sympy.Expr
"""
return self.differential
class AbelianDifferentialFirstKind(Differential):
def valuation_divisor(self, proof=False, **kwds):
r"""Returns the valuation divisor of the Abelian differential of the first kind.
Because Abelian differentials of the first kind are holomorphic on the
Riemann surface, the valuation divisor is of the form
.. math::
(\omega)_{val} = p_1 P_1 + \cdots + p_m P_m
where :math:`\omega` has a zero of multiplicity :math:`p_k` at the
place :math:`P_k`.
Parameters
----------
proof : bool
If set to `True`, will provably return the valuation divisor by
computing the valuation at every necessary place on `X`. Slow.
Notes
-----
This valuation divisor overload takes advantage of the fact that the
differential admits no poles. Therefore, as places on the Riemann
surface are checked, the degree of the valuation divisor is
non-decreasing. We can terminate the search the moment the degree
reaches :math:`2g-2`. If `proof=True` then ignore this trick and
compute over every possible place.
"""
xvalues = self._find_necessary_xvalues()
# for each xvalue, compute the places above it and determine valuation
# of the differential over each of these places.
D = Divisor(self.RS,0)
genus = self.RS.genus()
target_genus = 2*genus - 2
for alpha in xvalues:
places_above_alpha = self.RS(alpha)
for P in places_above_alpha:
n = P.valuation(self)
D += n*P
# abelian differentials of the first kind should have no poles
if n < 0:
raise ValueError(
'Could not compute valuation divisor of %s: '
'found a pole of differential of first kind.'%self)
# break out if the target degree is met
if (D.degree == target_genus) and (not proof):
return D
if D.degree != target_genus:
raise ValueError('Could not compute valuation divisor of %s: '
'did not reach genus requirement.'%self)
return D
class AbelianDifferentialSecondKind(Differential):
r"""Defines an Abelian Differential of the second kind.
An Abelian differential of the second kind is one constructed in the
following way: given a place :math:`P \in X` and a positive integer
:math:`m` an Abelian differential of second kind is a meromorphic
differential with a pole only at :math:`P` of order :math:`m+1`.
"""
def valuation_divisor(self, **kwds):
r"""Returns the valuation divisor of the Abelian differential of the second
kind.
Parameters
----------
none
Returns
-------
Divisor
"""
xvalues = self._find_necessary_xvalues()
# for each xvalue, compute the places above it and determine valuation
# of the differential over each of these places.
D = Divisor(self.RS,0)
genus = self.RS.genus()
target_genus = 2*genus - 2
num_poles = 0
for alpha in xvalues:
places_above_alpha = self.RS(alpha)
for P in places_above_alpha:
n = P.valuation(self)
D += n*P
# differentials of the second kind should have a single
# pole. raise an error if more are found
if n < 0: num_poles += 1
if num_poles > 1:
raise ValueError(
'Could not compute valuation divisor of %s: '
'found more than one pole.'%self)
# break out if (a) the degree requirement is met and (b) the
# pole was found.
if (D.degree == target_genus) and (num_poles):
return D
if D.degree != target_genus:
raise ValueError('Could not compute valuation divisor of %s: '
'did not reach genus requirement.'%self)
return D
|
mit
|
3324fr/spinalcordtoolbox
|
dev/tamag/old/msct_get_centerline_from_labels.py
|
1
|
10205
|
#!/usr/bin/env python
import numpy as np
import commands, sys
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
sys.path.append('/home/tamag/code')
from msct_image import Image
from msct_parser import Parser
import nibabel
import os
import time
import sct_utils as sct
from sct_orientation import get_orientation, set_orientation
from sct_process_segmentation import b_spline_centerline
from scipy import interpolate, ndimage
from msct_nurbs import NURBS
class ExtractCenterline :
def __init__(self):
self.list_image = []
self.list_file = []
self.centerline = []
self.dimension = [0, 0, 0, 0, 0, 0, 0, 0]
def addfiles(self, file):
path_data, file_data, ext_data = sct.extract_fname(file)
#check that files are same size
if len(self.list_file) > 0 :
self.dimension = sct.get_dimension(self.list_file[0])
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(file)
#if self.dimension != (nx, ny, nz, nt, px, py, pz, pt) :
if self.dimension[0:3] != (nx, ny, nz) or self.dimension[4:7] != (px, py, pz) :
# Return error and exit programm if not same size
print('\nError: Files are not of the same size.')
sys.exit()
# Add file if same size
self.list_file.append(file)
image_input = Image(file)
self.list_image.append(image_input)
print('\nFile', file_data+ext_data,' added to the list.')
def compute(self):
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])
# Define output image (size matter)
image_concatenation = self.list_image[0].copy()
image_concatenation.data *= 0
image_output = self.list_image[0].copy()
image_output.data *= 0
# Concatenate all files by addition
for i in range(0, len(self.list_image)):
for s in range(0, nz) :
image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))
# get center of mass of the centerline/segmentation
sct.printv('\nGet center of mass of the concatenate file...')
z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]
nz_nonz = len(z_centerline)
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
# Calculate centerline coordinates and create image of the centerline
for iz in range(0, nz_nonz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])
points = [[x_centerline[n],y_centerline[n], z_centerline[n]] for n in range(len(z_centerline))]
nurbs = NURBS(3, 1000, points)
P = nurbs.getCourbe3D()
x_centerline_fit = P[0]
y_centerline_fit = P[1]
z_centerline_fit = P[2]
#x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
for iz in range(0, z_centerline_fit.shape[0], 1):
image_output.data[x_centerline_fit[iz], y_centerline_fit[iz], z_centerline_fit[iz]] = 1
return image_output
def getCenterline(self, type='', output_file_name=None, verbose=0):
# Compute the centerline and save it into a image file of type "type"
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])
# Define output image (size matter)
image_concatenation = self.list_image[0].copy()
image_concatenation.data *= 0
image_output = self.list_image[0].copy()
image_output.data *= 0
# Concatenate all files by addition
for i in range(0, len(self.list_image)):
for s in range(0, nz) :
image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))
print image_concatenation.data[:,:,414]
# get center of mass of the centerline/segmentation
sct.printv('\nGet center of mass of the concatenate file...')
z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]
nz_nonz = len(z_centerline)
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
# Calculate centerline coordinates and create image of the centerline
for iz in range(0, nz_nonz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])
#x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(nz_nonz)]
nurbs = NURBS(3, 1000, points, nbControl=None)
P = nurbs.getCourbe3D()
x_centerline_fit = P[0]
y_centerline_fit = P[1]
z_centerline_fit = P[2]
if verbose==1 :
import matplotlib.pyplot as plt
#Creation of a vector x that takes into account the distance between the labels
x_display = [0 for i in range(x_centerline_fit.shape[0])]
y_display = [0 for i in range(y_centerline_fit.shape[0])]
for i in range(0, nz_nonz, 1):
x_display[z_centerline[i]-z_centerline[0]] = x_centerline[i]
y_display[z_centerline[i]-z_centerline[0]] = y_centerline[i]
plt.figure(1)
plt.subplot(2,1,1)
#plt.plot(z_centerline,x_centerline, 'ro')
plt.plot(z_centerline_fit, x_display, 'ro')
plt.plot(z_centerline_fit, x_centerline_fit)
plt.xlabel("Z")
plt.ylabel("X")
plt.title("x and x_fit coordinates")
plt.subplot(2,1,2)
#plt.plot(z_centerline,y_centerline, 'ro')
plt.plot(z_centerline_fit, y_display, 'ro')
plt.plot(z_centerline_fit, y_centerline_fit)
plt.xlabel("Z")
plt.ylabel("Y")
plt.title("y and y_fit coordinates")
plt.show()
for iz in range(0, z_centerline_fit.shape[0], 1):
image_output.data[int(round(x_centerline_fit[iz])), int(round(y_centerline_fit[iz])), z_centerline_fit[iz]] = 1
#image_output.save(type)
file_load = nibabel.load(self.list_file[0])
data = file_load.get_data()
hdr = file_load.get_header()
print '\nWrite NIFTI volumes...'
img = nibabel.Nifti1Image(image_output.data, None, hdr)
if output_file_name != None :
file_name = output_file_name
else: file_name = 'generated_centerline.nii.gz'
nibabel.save(img,file_name)
# to view results
print '\nDone !'
print '\nTo view results, type:'
print 'fslview '+file_name+' &\n'
def writeCenterline(self, output_file_name=None):
# Compute the centerline and write the float coordinates into a txt file
nx, ny, nz, nt, px, py, pz, pt = sct.get_dimension(self.list_file[0])
# Define output image (size matter)
image_concatenation = self.list_image[0].copy()
image_concatenation.data *= 0
image_output = self.list_image[0].copy()
image_output.data *= 0
# Concatenate all files by addition
for i in range(0, len(self.list_image)):
for s in range(0, nz) :
image_concatenation.data[:,:,s] = image_concatenation.data[:,:,s] + self.list_image[i].data[:,:,s] #* (1/len(self.list_image))
# get center of mass of the centerline/segmentation
sct.printv('\nGet center of mass of the concatenate file...')
z_centerline = [iz for iz in range(0, nz, 1) if image_concatenation.data[:, :, iz].any()]
nz_nonz = len(z_centerline)
x_centerline = [0 for iz in range(0, nz_nonz, 1)]
y_centerline = [0 for iz in range(0, nz_nonz, 1)]
# Calculate centerline coordinates and create image of the centerline
for iz in range(0, nz_nonz, 1):
x_centerline[iz], y_centerline[iz] = ndimage.measurements.center_of_mass(image_concatenation.data[:, :, z_centerline[iz]])
#x_centerline_fit, y_centerline_fit,x_centerline_deriv,y_centerline_deriv,z_centerline_deriv = b_spline_centerline(x_centerline,y_centerline,z_centerline)
points = [[x_centerline[n], y_centerline[n], z_centerline[n]] for n in range(nz_nonz)]
nurbs = NURBS(3, 1000, points)
P = nurbs.getCourbe3D()
x_centerline_fit = P[0]
y_centerline_fit = P[1]
z_centerline_fit = P[2]
# Create output text file
if output_file_name != None :
file_name = output_file_name
else: file_name = 'generated_centerline.txt'
sct.printv('\nWrite text file...')
#file_results = open("../"+file_name, 'w')
file_results = open(file_name, 'w')
for i in range(0, z_centerline_fit.shape[0], 1):
file_results.write(str(int(z_centerline_fit[i])) + ' ' + str(x_centerline_fit[i]) + ' ' + str(y_centerline_fit[i]) + '\n')
file_results.close()
#return file_name
# =======================================================================================================================
# Start program
#=======================================================================================================================
if __name__ == "__main__":
parser = Parser(__file__)
parser.usage.set_description('Class to process centerline extraction from.')
parser.add_option()
arguments = parser.parse(sys.argv[1:])
image = Image(arguments["-i"])
image.changeType('minimize')
|
mit
|
ryfeus/lambda-packs
|
LightGBM_sklearn_scipy_numpy/source/sklearn/neighbors/regression.py
|
8
|
10967
|
"""Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
|
mit
|
bchappet/dnfpy
|
src/dnfpyUtils/stats/clusterMap1.py
|
1
|
1863
|
from dnfpy.core.map2D import Map2D
import numpy as np
from sklearn.cluster import DBSCAN
import scipy.spatial.distance as dist
from dnfpyUtils.stats.clusteMap import ClusterMap
class ClusterMap1(ClusterMap):
"""
For 1 bubble!! 1 cluster is computed simply as barycenter
Params:
"continuity" : float if different of 0.0, we assume that the cluster are continuous
A continuous cluster allow a loss of activity during continuity seconds.
Otherwise, the cluster is deleted
We add the last cluster in the current coords
Then we deduce what label labels the new cluster
The first iteration determines the labels for the next ones
"threshold" : threshold for activity value to be considered
"expectedNumberOfCluster" : 1
Results:
_data = np array (2) with cluster barycenter coords X,Y:
"""
def __init__(self,name,size=0,dt=0.1,threshold=0.4,
sizeNpArr=1,continuity=1.0,
**kwargs):
super().__init__(name=name,size=size,dt=dt,threshold=threshold,
clustSize=clustSize=None,min_samples=None,sizeNpArr=sizeNpArr,
continuity=continuity,expectedNumberOfCluster=1,
**kwargs)
def _compute(self,size,np_arr,threshold,clustSize_,continuity,dt):
maxArr = np.max(np_arr)
coords = np.where(np_arr > maxArr/1.2)
self.nbActivation = len(coords[0])
#if nbActivation > 0 and nbActivation < np_arr.shape[0]*1.6:
#print("threshold : ",nbActMax)
if self.nbActivation > 0 :
self._data= np.mean(coords,axis=0)
else:
self._data=[np.nan,np.nan]
def _onParamsUpdate(self,clustSize,sizeNpArr):
clustSize_ = clustSize * sizeNpArr
return dict(clustSize_=clustSize_)
|
gpl-2.0
|
kevalds51/sympy
|
sympy/plotting/plot.py
|
55
|
64797
|
"""Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.core.compatibility import range
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor=s.line_color, edgecolor='None')
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", s.line_color])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
# Workaround plotting piecewise (#8577):
# workaround for `lambdify` in `.experimental_lambdify` fails
# to return numerical values in some cases. Lower-level fix
# in `lambdify` is possible.
vector_a = (x - y).astype(np.float)
vector_b = (z - y).astype(np.float)
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set().union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set().union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
|
bsd-3-clause
|
ysig/BioClassSim
|
source/classify/classifier.py
|
1
|
1697
|
import numpy as np
from sklearn import svm
def kernelization(X,t=0):
# for 1 to 3 array is considered symmetric
if(t==1):
#spectrum clip
e,v = np.linalg.eig(X)
ep = np.maximum.reduce([e,np.zeros(e.shape[0])])
S = np.dot(v.T,np.dot(np.diag(ep),v))
elif(t==2):
#spectrum flip
e,v = np.linalg.eig(X)
ep = np.abs(e)
S = np.dot(v.T,np.dot(np.diag(ep),v))
elif(t==3):
#spectrum shift
e,v = np.linalg.eig(X)
minS = np.min(e)
minS = min(minS,0)
if (minS==0):
S = X
else:
S = np.dot(v.T,np.dot(np.diag(e+minS),v))
elif(t==4):
#spectrum square
S = np.dot(X,X.T)
else:
#leave as is
S = X
return S
class classifier:
def __init__(self):
pass
def learn(self):
pass
def classify(self):
pass
class SVM(classifier):
def __init__(self):
self._clf = None
def learn_mat(self,X,labels,probability=False):
# input is in the form of a valid kernel
# propability parameter determines if svm fit
# result will be in 01 form or not
self._clf = svm.SVC(kernel='precomputed',probability = probability)
self._clf.fit(X,labels)
def classify(self,X_test):
return self._clf.predict(X_test)
def predict_prob(self,X_test):
return self._clf.predict_proba(X_test)
def decision_function(self,X_test):
return self._clf.decision_function(X_test)
def getClassifier(self):
return self._clf
|
apache-2.0
|
ucbtrans/sumo-project
|
examples/10_cars/runner-update_6_9_16.py
|
1
|
18135
|
#!/usr/bin/env python
#@file runner.py
import os
import sys
import optparse
import subprocess
import random
import pdb
import matplotlib.pyplot as plt
import math
import numpy, scipy.io
sys.path.append(os.path.join('..', '..', 'utils'))
# import python modules from $SUMO_HOME/tools directory
try:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(
__file__)), '..', "tools"))
sys.path.append(os.path.join(os.environ.get("SUMO_HOME", os.path.join(
os.path.dirname(os.path.realpath(
__file__)), "..")), "tools"))
from sumolib import checkBinary
except ImportError:
sys.exit("please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')")
from getTrajectory import *
import traci
PORT = 8873 # the port used for communicating with your sumo instance
# designates the phases definitions, one letter for each direction and turn type, this is for intersection 13
NSGREEN = "GGGgrrrrGGGrrrr"
NSYELLOW = "yyygrrrryyyrrrr"
TURN1 = "rrrGrrrrrrrrrrr" # the phase for cars turning
CLEAR1 = "rrryrrrrrrrrrrr"
WEGREEN = "rrrrGGGgrrrGGGg"
WEYELLOW = "rrrryyygrrryyyg"
TURN2 = "rrrrrrrGrrrrrrG" # the second phase for cars turning
CLEAR2 = "rrrrrrryrrrrrry"
# An example of a potential cycle for the traffic signal, 1 second each step
# NS pass goes during i=0-9 and WE pass goes during i=16-33
NS_END = 29; NS_START = 23; WE_END=101; WE_START=95;
PROGRAM = [NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #10
NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #10
NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #11
NSYELLOW, NSYELLOW, NSYELLOW, NSYELLOW, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, CLEAR1, CLEAR1, # 17 # change number of TURN1 to change turning duration
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, CLEAR2, CLEAR2] #18
#PRORGAM WITH NO ACCIDENTS
'''
PROGRAM = [NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #10
NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #10
NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, NSGREEN, #11
NSYELLOW, NSYELLOW, NSYELLOW, NSYELLOW, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, TURN1, CLEAR1, CLEAR1, # 17 # change number of TURN1 to change turning duration
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #10
WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, WEGREEN, #8
WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, WEYELLOW, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, TURN2, CLEAR2, CLEAR2] #18
'''
print len(PROGRAM)
# Runs the simulation, and allows you to change traffic phase
def run():
## execute the TraCI control loop
traci.init(PORT)
programPointer = 0 # initiates at start # len(PROGRAM) - 1 # initiates at end
step = 0
# Keeps track of current queue length in each direction
queue_east = 0
queue_north = 0
queue_west = 0
queue_south = 0
# Flow counters, currently double counts cars
flow_east = 0
flow_north = 0
flow_west = 0
flow_south = 0
# Counters for soft reset at 30 minutes
flow_east_be = 0
flow_north_be = 0
flow_west_be = 0
flow_south_be = 0
# Keeps track of the last car through each sensor
last_east1 = ""
last_north1 = ""
last_west1 = ""
last_south1 = ""
last_east2 = ""
last_north2 = ""
last_west2 = ""
last_south2 = ""
last_east3 = ""
last_east_t1 = ""
last_north_t1 = ""
last_west_t1 = ""
last_south_t1 = ""
last_east_t2 = ""
last_north_t2 = ""
last_west_t2 = ""
last_south_t2 = ""
while traci.simulation.getMinExpectedNumber() > 0 and step <= 60*30: # 0.1 hours
traci.simulationStep() # advance a simulation step
# sets next phase in the program cycle
#programPointer = (int(programPointer + 1) % len(PROGRAM))
programPointer = (step/10)%len(PROGRAM)
#print programPointer
# gets number of vehicles in the induction area in the last step, this is currently not being used
# numPriorityVehicles = traci.inductionloop.getLastStepVehicleNumber("south_inner1")
###################################### SOUTH ######################################
structure1 = traci.inductionloop.getVehicleData("south_inner1")
structure2 = traci.inductionloop.getVehicleData("south_inner2")
structure3 = traci.inductionloop.getVehicleData("south_outer1")
structure4 = traci.inductionloop.getVehicleData("south_outer2")
structure5 = traci.inductionloop.getVehicleData("south_check1")
structure6 = traci.inductionloop.getVehicleData("south_check2")
# Detecting a full queue using method 1
if (structure3 and structure3[0][0] == last_south_t1 and structure3[0][3] == -1) or (structure5 and structure5[0][0] == last_south_t1 and structure5[0][3] == -1): # in case we detect the back is still
if (structure4 and structure4[0][0] == last_south_t2 and structure4[0][3] == -1) or (structure6 and structure6[0][0] == last_south_t2 and structure6[0][3] == -1):
if structure1 and structure2 and structure1[0][0] == last_south1 and structure2[0][0] == last_south2: # in case we detect the front is still
# use getLastStepMeanSpeed instead?
if (structure1[0][3] == -1) and (structure2[0][3] == -1): # all four cars are still
#if queue_south > 24: # we are already almost full (one car didn't get detected), method 2
#print "South Queue Full"
queue_south = 26
for car in (structure1):
if structure1 and car[0] != last_south1 and car[0] != last_south2:
last_south1 = car[0]
queue_south -= 1
flow_south += 1
for car in (structure2):
if structure2 and car[0] != last_south1 and car[0] != last_south2:
last_south2 = car[0]
queue_south -= 1
flow_south += 1
for car in (structure3):
if structure3 and car[0] != last_south_t1 and car[0] != last_south_t2:
last_south_t1 = car[0]
queue_south += 1
flow_south += 1
for car in (structure4):
if structure4 and car[0] != last_south_t1 and car[0] != last_south_t2:
last_south_t2 = car[0]
queue_south += 1
flow_south += 1
if queue_south < 0:
queue_south = 0
###################################### WEST ######################################
structure1 = traci.inductionloop.getVehicleData("west_inner1")
structure2 = traci.inductionloop.getVehicleData("west_inner2")
structure3 = traci.inductionloop.getVehicleData("west_outer1")
structure4 = traci.inductionloop.getVehicleData("west_outer2")
structure5 = traci.inductionloop.getVehicleData("west_check1")
structure6 = traci.inductionloop.getVehicleData("west_check2")
# Detecting a full queue using method 1
if (structure3 and structure3[0][0] == last_west_t1 and structure3[0][3] == -1) or (structure5 and structure5[0][0] == last_west_t1 and structure5[0][3] == -1): # in case we detect the back is still
if (structure4 and structure4[0][0] == last_west_t2 and structure4[0][3] == -1) or (structure6 and structure6[0][0] == last_west_t2 and structure6[0][3] == -1):
if structure1 and structure2 and structure1[0][0] == last_west1 and structure2[0][0] == last_west2: # in case we detect the front is still
if (structure1[0][3] == -1) and (structure2[0][3] == -1): # all four cars are still
#if queue_west > 24: # we are already almost full (one car didn't get detected), method 2
#print "West Queue Full"
queue_west = 26
for car in (structure1):
if structure1 and car[0] != last_west1 and car[0] != last_west2:
last_west1 = car[0]
queue_west -= 1
flow_west += 1
for car in (structure2):
if structure2 and car[0] != last_west1 and car[0] != last_west2:
last_west2 = car[0]
queue_west -= 1
flow_west += 1
for car in (structure3):
if structure3 and car[0] != last_west_t1 and car[0] != last_west_t2:
last_west_t1 = car[0]
queue_west += 1
flow_west += 1
for car in (structure4):
if structure4 and car[0] != last_west_t1 and car[0] != last_west_t2:
last_west_t2 = car[0]
queue_west += 1
flow_west += 1
if queue_west < 0:
queue_west = 0
###################################### NORTH ######################################
structure1 = traci.inductionloop.getVehicleData("north_inner1")
structure2 = traci.inductionloop.getVehicleData("north_inner2")
structure3 = traci.inductionloop.getVehicleData("north_outer1")
structure4 = traci.inductionloop.getVehicleData("north_outer2")
if structure1 and structure1[0][0] != last_north1:
last_north1 = structure1[0][0]
queue_north -= 1
flow_north += 1
if structure2 and structure2[0][0] != last_north2:
last_north2 = structure2[0][0]
queue_north -= 1
flow_north += 1
if structure3 and structure3[0][0] != last_north_t1:
last_north_t1 = structure3[0][0]
queue_north += 1
flow_north += 1
if structure4 and structure4[0][0] != last_north_t2:
last_north_t2 = structure4[0][0]
queue_north += 1
flow_north += 1
if queue_north < 0:
queue_north = 0
###################################### EAST ######################################
structure1 = traci.inductionloop.getVehicleData("east_inner1")
structure2 = traci.inductionloop.getVehicleData("east_inner2")
structure3 = traci.inductionloop.getVehicleData("east_outer1")
structure4 = traci.inductionloop.getVehicleData("east_outer2")
structure5 = traci.inductionloop.getVehicleData("east_branch")
for car in (structure1):
if structure1 and car[0] != last_east1 and car[0] != last_east2:
last_east1 = car[0]
queue_east -= 1
flow_east += 1
for car in (structure2):
if structure2 and car[0] != last_east1 and car[0] != last_east2:
last_east2 = car[0]
queue_east -= 1
flow_east += 1
for car in (structure3):
if structure3 and car[0] != last_east_t1:
last_east_t1 = car[0]
queue_east += 1
flow_east += 1
for car in (structure4):
if structure4 and car[0] != last_east_t2:
last_east_t2 = car[0]
queue_east += 1
flow_east += 1
for car in (structure5):
if structure5 and [0] != last_east3:
last_east3 = structure5[0][0] # branch
queue_east -= 1
flow_east += 1
if queue_east < 0:
queue_east = 0
###################################### LIGHT CONTROL ######################################
light_control = False
if light_control:
if (queue_east + queue_west) < (queue_north + queue_south): # if the vertical pressure is higher
if programPointer == NS_END:
# print "restarting NS"
# NS is currently ending, go back
programPointer = NS_START
# elif programPointer > WE_START:
# # WE is currently active, skip to end of phase
# programPointer = max(WE_END, programPointer)
elif (queue_east + queue_west) > (queue_north + queue_south): # then horizontal pressure is higher
if programPointer == WE_END:
# print "restarting WE"
# WE is currently ending, restart
programPointer = WE_START
# elif programPointer < NS_END:
# # NS is currently active, skip to end of phase
# programPointer = NS_END
if step == 60 * 30:
flow_east_be = flow_east
flow_west_be = flow_west
flow_north_be = flow_north
flow_south_be = flow_south
if step == 60 * 60 * 1.5: # (step % (60*30) == 0) and (step > 0):
print "----"
# print(str(flow_east) + " " + str(flow_west) + " " + str(flow_north) + " " + str(flow_south))
print (flow_east - flow_east_be)
print (flow_west - flow_west_be)
print (flow_north - flow_north_be)
print (flow_south - flow_south_be)
print "----"
################################# WEST PLATOONING #################################
# If the signal is about to open, create platoons
platooning = True
if platooning:
if (programPointer >= 45 and programPointer <= 49):
cars = traci.lane.getLastStepVehicleIDs("G5_0") #previously G5_0
print "here"
#pdb.set_trace()
# iterate through cars in order of closest to light
#print "-------------------------------------"+str(step)
for car in reversed(cars):
print reversed(cars)
# if traci.vehicle.getPosition(car): # potential check to add to see if car is past a certain point; not necessary
# print traci.vehicle.getRoute(car)
# check if the vehicle is automatic
type = traci.vehicle.getTypeID(car)
if type == "CarA":
# If the car is automatic, add to platoon here
# print ("Reducing minimum gap and increasing speed")
# print traci.vehicle.getPosition(car)
traci.vehicle.setMinGap(car, 0.1) # temporarily set its minimum gap, or other property
traci.vehicle.setTau(car,0.5)
#traci.vehicle.setSpeed(car, 70) # set its speed
traci.vehicle.setColor(car,(0,0,255,0))
# if it is manual, stop making the platoon, since no cars behind can accelerate anyways
#continue
#pdb.set_trace()
#pdb.set_trace()
# sets traffic light at intersection 13 at the phase indicated
traci.trafficlights.setRedYellowGreenState("13", PROGRAM[programPointer])
step += 1
#print str(step)
traci.close()
sys.stdout.flush()
#get_options function for SUMO
def get_options():
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=True, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
# this is the main entry point of this script
if __name__ == "__main__":
options = get_options()
# this script has been called from the command line. It will start sumo as a
# server, then connect and run
if (options.nogui):
sumoBinary = checkBinary('sumo-gui')
else:
sumoBinary = checkBinary('sumo-gui')
# this is the normal way of using traci. sumo is started as a
# subprocess and then the python script connects and runs
sumoProcess = subprocess.Popen([sumoBinary, "-c", "../../networks/huntington_colorado/huntcol.sumocfg","--step-length", "0.1", "--tripinfo-output",
"tripinfo.xml", "--netstate-dump","test.xml", "--fcd-output","fcd.xml", "--remote-port", str(PORT)], stdout=sys.stdout, stderr=sys.stderr)
run()
sumoProcess.wait()
file_name = 'fcd.xml'
veh_id = inputnumber() #do vehicles 22,31 for 100% automated, vehicles 2,8 for all manual
#veh_id = str([66,72,74,87,90,108,114,120]) #100%automated vehicles
t,dist = trajectoryData(file_name,veh_id)
for i in range(len(dist)):
plt.plot(t[i][:len(t[i])-1],dist[i])
plt.xlabel('Time (s)')
plt.ylabel('Distance Travelled')
plt.title('Trajectory')
plt.axis([0, 80, 0, 350])
#plt.legend(['Veh ' + str for str in veh_id])
plt.show()
'''
plt.plot(t[0][:80],abs(dist[0][:80]-dist[1][:80]))
plt.xlabel('Time (s)')
plt.ylabel('Distance Travelled')
plt.title('Trajectory')
plt.legend(['Veh ' + str for str in veh_id])
plt.show()
'''
|
bsd-2-clause
|
mne-tools/mne-python
|
logo/generate_mne_logos.py
|
13
|
7174
|
# -*- coding: utf-8 -*-
"""
===============================================================================
Script 'mne logo'
===============================================================================
This script makes the logo for MNE.
"""
# @author: drmccloy
# Created on Mon Jul 20 11:28:16 2015
# License: BSD (3-clause)
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib import rcParams
from scipy.stats import multivariate_normal
from matplotlib.path import Path
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch, Ellipse
from matplotlib.colors import LinearSegmentedColormap
# manually set values
dpi = 300.
center_fudge = np.array([15, 0]) # compensate for font bounding box padding
tagline_scale_fudge = 0.97 # to get justification right
tagline_offset_fudge = np.array([0, -100.])
# font, etc
rcp = {'font.sans-serif': ['Primetime'], 'font.style': 'normal',
'font.weight': 'black', 'font.variant': 'normal', 'figure.dpi': dpi,
'savefig.dpi': dpi, 'contour.negative_linestyle': 'solid'}
plt.rcdefaults()
rcParams.update(rcp)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(5, 3), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# fake field data
delta = 0.01
x = np.arange(-8.0, 8.0, delta)
y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
xy = np.array([X, Y]).transpose(1, 2, 0)
Z1 = multivariate_normal.pdf(xy, mean=[-5.0, 0.9],
cov=np.array([[8.0, 1.0], [1.0, 7.0]]) ** 2)
Z2 = multivariate_normal.pdf(xy, mean=[2.6, -2.5],
cov=np.array([[15.0, 2.5], [2.5, 2.5]]) ** 2)
Z = Z2 - 0.7 * Z1
# color map: field gradient (yellow-red-gray-blue-cyan)
# yrtbc = {
# 'red': ((0, 1, 1), (0.4, 1, 1), (0.5, 0.5, 0.5), (0.6, 0, 0), (1, 0, 0)),
# 'blue': ((0, 0, 0), (0.4, 0, 0), (0.5, 0.5, 0.5), (0.6, 1, 1), (1, 1, 1)), # noqa
# 'green': ((0, 1, 1), (0.4, 0, 0), (0.5, 0.5, 0.5), (0.6, 0, 0), (1, 1, 1)), # noqa
# }
yrtbc = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)),
'alpha': ((0.0, 1.0, 1.0), (0.4, 0.8, 0.8), (0.5, 0.2, 0.2),
(0.6, 0.8, 0.8), (1.0, 1.0, 1.0))}
# color map: field lines (red | blue)
redbl = {'red': ((0., 1., 1.), (0.5, 1., 0.), (1., 0., 0.)),
'blue': ((0., 0., 0.), (0.5, 0., 1.), (1., 1., 1.)),
'green': ((0., 0., 0.), (1., 0., 0.)),
'alpha': ((0., 0.4, 0.4), (1., 0.4, 0.4))}
mne_field_grad_cols = LinearSegmentedColormap('mne_grad', yrtbc)
mne_field_line_cols = LinearSegmentedColormap('mne_line', redbl)
# plot gradient and contour lines
im = plt.imshow(Z, cmap=mne_field_grad_cols, aspect='equal')
cs = plt.contour(Z, 9, cmap=mne_field_line_cols, linewidths=1)
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# create MNE clipping mask
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = plot_dims / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
ax.add_patch(PathPatch(mne_clip, color='w', zorder=0, linewidth=0))
# apply clipping mask to field gradient and lines
im.set_clip_path(mne_clip, transform=im.get_transform())
for coll in cs.collections:
coll.set_clip_path(mne_clip, transform=im.get_transform())
# get final position of clipping mask
mne_corners = mne_clip.get_extents().corners()
# add tagline
rcParams.update({'font.sans-serif': ['Cooper Hewitt'], 'font.weight': 'light'})
tag_path = TextPath((0, 0), 'MEG + EEG ANALYSIS & VISUALIZATION')
dims = tag_path.vertices.max(0) - tag_path.vertices.min(0)
vert = tag_path.vertices - dims / 2.
mult = tagline_scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted
offset = mne_corners[-1] - np.array([mne_clip.get_extents().size[0] / 2.,
-dims[1]]) - tagline_offset_fudge
tag_clip = Path(offset + vert * mult, tag_path.codes)
tag_patch = PathPatch(tag_clip, facecolor='k', edgecolor='none', zorder=10)
ax.add_patch(tag_patch)
yl = ax.get_ylim()
yy = np.max([tag_clip.vertices.max(0)[-1],
tag_clip.vertices.min(0)[-1]])
ax.set_ylim(np.ceil(yy), yl[-1])
# only save actual image extent plus a bit of padding
plt.draw()
static_dir = op.join(op.dirname(__file__), '..', 'doc', '_static')
assert op.isdir(static_dir)
plt.savefig(op.join(static_dir, 'mne_logo.svg'), transparent=True)
# modify to make an icone
data_dir = op.join(op.dirname(__file__), '..', 'mne', 'icons')
ax.patches.pop(-1) # no tag line for our icon
ax.collections[:] = []
bounds = np.array([
[mne_path.vertices[:, ii].min(), mne_path.vertices[:, ii].max()]
for ii in range(2)])
bounds *= (plot_dims / dims)
xy = np.mean(bounds, axis=1) - [100, 0]
r = np.diff(bounds, axis=1).max() * 1.2
ax.add_patch(Ellipse(xy, r, r, clip_on=False, zorder=-1, fc='k'))
ax.set_ylim(xy[1] + r / 1.9, xy[1] - r / 1.9)
fig.set_size_inches((256 / dpi, 256 / dpi))
# Qt does not support clip paths in SVG rendering so we have to use PNG here
# then use "optipng -o7" on it afterward (14% reduction in file size)
plt.savefig(op.join(data_dir, 'mne-circle-black.png'), transparent=True)
plt.close()
# 92x22 image
w_px = 92
h_px = 22
center_fudge = np.array([20, 0])
scale_fudge = 2.1
rcParams.update({'font.sans-serif': ['Primetime'], 'font.weight': 'black'})
x = np.linspace(-1., 1., w_px // 2)
y = np.linspace(-1., 1., h_px // 2)
X, Y = np.meshgrid(x, y)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(w_px / dpi, h_px / dpi), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# plot rainbow
ax.imshow(X, cmap=mne_field_grad_cols, aspect='equal', zorder=1)
ax.imshow(np.ones_like(X) * 0.5, cmap='Greys', aspect='equal', zorder=0,
clim=[0, 1])
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# MNE text in white
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = np.array([scale_fudge, 1.]) * \
np.array([-dims[0], plot_dims[-1]]) / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
mne_patch = PathPatch(mne_clip, facecolor='w', edgecolor='none', zorder=10)
ax.add_patch(mne_patch)
# adjust xlim and ylim
mne_corners = mne_clip.get_extents().corners()
xmin, ymin = np.min(mne_corners, axis=0)
xmax, ymax = np.max(mne_corners, axis=0)
xl = ax.get_xlim()
yl = ax.get_ylim()
xpad = np.abs(np.diff([xmin, xl[1]])) / 20.
ypad = np.abs(np.diff([ymax, ymin])) / 20.
ax.set_xlim(xmin - xpad, xl[1] + xpad)
ax.set_ylim(ymax + ypad, ymin - ypad)
plt.draw()
plt.savefig(op.join(static_dir, 'mne_logo_small.svg'), transparent=True)
plt.close()
|
bsd-3-clause
|
Achuth17/scikit-learn
|
sklearn/linear_model/__init__.py
|
270
|
3096
|
"""
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
|
bsd-3-clause
|
sinkap/trappy
|
tests/test_baretrace.py
|
2
|
3406
|
# Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import trappy
import unittest
class TestBareTrace(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBareTrace, self).__init__(*args, **kwargs)
dfr0 = pd.DataFrame({"l1_misses": [24, 535, 41],
"l2_misses": [155, 11, 200],
"cpu": [ 0, 1, 0]},
index=pd.Series([1.020, 1.342, 1.451], name="Time"))
dfr1 = pd.DataFrame({"load": [ 35, 16, 21, 28],
"util": [279, 831, 554, 843]},
index=pd.Series([1.279, 1.718, 2.243, 2.465], name="Time"))
self.dfr = [dfr0, dfr1]
def test_bare_trace_accepts_name(self):
"""The BareTrace() accepts a name parameter"""
trace = trappy.BareTrace(name="foo")
self.assertEquals(trace.name, "foo")
def test_bare_trace_can_add_parsed_event(self):
"""The BareTrace() class can add parsed events to its collection of trace events"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counters", self.dfr[0])
self.assertEquals(len(trace.pmu_counters.data_frame), 3)
self.assertEquals(trace.pmu_counters.data_frame["l1_misses"].iloc[0], 24)
trace.add_parsed_event("pivoted_counters", self.dfr[0], pivot="cpu")
self.assertEquals(trace.pivoted_counters.pivot, "cpu")
def test_bare_trace_get_duration(self):
"""BareTrace.get_duration() works for a simple case"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0])
trace.add_parsed_event("load_event", self.dfr[1])
self.assertEquals(trace.get_duration(), self.dfr[1].index[-1])
def test_bare_trace_get_duration_normalized(self):
"""BareTrace.get_duration() works if the trace has been normalized"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0].copy())
trace.add_parsed_event("load_event", self.dfr[1].copy())
basetime = self.dfr[0].index[0]
trace.normalize_time(basetime)
expected_duration = self.dfr[1].index[-1] - basetime
self.assertEquals(trace.get_duration(), expected_duration)
def test_bare_trace_normalize_time_accepts_basetime(self):
"""BareTrace().normalize_time() accepts an arbitrary basetime"""
trace = trappy.BareTrace()
trace.add_parsed_event("pmu_counter", self.dfr[0].copy())
prev_first_time = trace.pmu_counter.data_frame.index[0]
basetime = 3
trace.normalize_time(basetime)
self.assertEquals(trace.basetime, basetime)
exp_first_time = prev_first_time - basetime
self.assertEquals(round(trace.pmu_counter.data_frame.index[0] - exp_first_time, 7), 0)
|
apache-2.0
|
NunoEdgarGub1/scikit-learn
|
examples/ensemble/plot_ensemble_oob.py
|
259
|
3265
|
"""
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
|
bsd-3-clause
|
herow/planning_qgis
|
python/plugins/processing/algs/qgis/PolarPlot.py
|
5
|
3040
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from matplotlib.pyplot import figure
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class PolarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name = 'Polar plot'
self.group = 'Graphics'
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Output')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
fig = figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
N = len(values[valuefieldname])
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / N)
radii = values[valuefieldname]
width = 2 * np.pi / N
ax.bar(theta, radii, width=width, bottom=0.0)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
|
gpl-2.0
|
vermouthmjl/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
glennq/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
Erotemic/plottool
|
plottool_ibeis/__MPL_INIT__.py
|
1
|
8661
|
# -*- coding: utf-8 -*-
"""
Notes:
To use various backends certian packages are required
PyQt
...
Tk
pip install
sudo apt-get install tk
sudo apt-get install tk-dev
Wx
pip install wxPython
GTK
pip install PyGTK
pip install pygobject
pip install pygobject
Cairo
pip install pycairo
pip install py2cairo
pip install cairocffi
sudo apt-get install libcairo2-dev
CommandLine:
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=GTKAgg
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=TkAgg
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=WxAgg
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=WebAgg
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=gdk
python -m plottool_ibeis.draw_func2 --exec-imshow --show --mplbe=cairo
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os
import utool as ut
from six.moves import builtins
ut.noinject(__name__, '[plottool_ibeis.__MPL_INIT__]')
try:
profile = getattr(builtins, 'profile')
except AttributeError:
def profile(func):
return func
__IS_INITIALIZED__ = False
__WHO_INITIALIZED__ = None
VERBOSE_MPLINIT = ut.get_argflag(('--verb-mpl', '--verbose'))
TARGET_BACKEND = ut.get_argval(('--mpl-backend', '--mplbe'), type_=str, default=os.environ.get('MPL_BACKEND', None))
FALLBACK_BACKEND = ut.get_argval(('--mpl-fallback-backend', '--mplfbbe'), type_=str, default='agg')
def print_all_backends():
import matplotlib.rcsetup as rcsetup
print(rcsetup.all_backends)
valid_backends = [u'GTK', u'GTKAgg', u'GTKCairo', u'MacOSX', u'Qt4Agg',
u'Qt5Agg', u'TkAgg', u'WX', u'WXAgg', u'CocoaAgg',
u'GTK3Cairo', u'GTK3Agg', u'WebAgg', u'nbAgg', u'agg',
u'cairo', u'emf', u'gdk', u'pdf', u'pgf', u'ps', u'svg',
u'template']
del valid_backends
def get_pyqt():
have_guitool_ibeis = ut.check_module_installed('guitool_ibeis')
try:
if have_guitool_ibeis:
from guitool_ibeis import __PYQT__ as PyQt
pyqt_version = PyQt._internal.GUITOOL_PYQT_VERSION
else:
try:
import PyQt5 as PyQt
pyqt_version = 5
except ImportError:
import PyQt4 as PyQt
pyqt_version = 4
except ImportError:
PyQt = None
pyqt_version = None
return PyQt, pyqt_version
def get_target_backend():
if (not sys.platform.startswith('win32') and
not sys.platform.startswith('darwin') and
os.environ.get('DISPLAY', None) is None):
# Write to files if we cannot display
# target_backend = 'PDF'
target_backend = FALLBACK_BACKEND
else:
target_backend = TARGET_BACKEND
if target_backend is None:
PyQt, pyqt_version = get_pyqt()
if pyqt_version is None:
print('[!plotttool] WARNING backend fallback to %s' % (FALLBACK_BACKEND, ))
target_backend = FALLBACK_BACKEND
elif pyqt_version == 4:
target_backend = 'Qt4Agg'
elif pyqt_version == 5:
target_backend = 'Qt5Agg'
else:
raise ValueError('Unknown pyqt version %r' % (pyqt_version,))
return target_backend
def _init_mpl_rcparams():
import matplotlib as mpl
from matplotlib import style
#http://matplotlib.org/users/style_sheets.html
nogg = ut.get_argflag('--nogg')
if not nogg:
style.use('ggplot')
#style.use(['ggplot'])
#print('style.available = %r' % (style.available,))
#style.use(['bmh'])
#style.use(['classic'])
#import utool
#utool.embed()
#style.use(['ggplot', 'dark_background'])
if ut.get_argflag('--notoolbar'):
toolbar = 'None'
else:
toolbar = 'toolbar2'
mpl.rcParams['toolbar'] = toolbar
#mpl.rc('text', usetex=False)
if ut.get_argflag('--usetex'):
#mpl.rc('text', usetex=True)
mpl.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
mpl.rcParams['text.latex.unicode'] = True
mpl_keypress_shortcuts = [key for key in mpl.rcParams.keys() if key.find('keymap') == 0]
for key in mpl_keypress_shortcuts:
mpl.rcParams[key] = ''
CUSTOM_GGPLOT = 1
if CUSTOM_GGPLOT and not nogg:
ggplot_style = style.library['ggplot'] # NOQA
# print('ggplot_style = %r' % (ggplot_style,))
custom_gg = {
'axes.axisbelow': True,
#'axes.edgecolor': 'white',
'axes.facecolor': '#E5E5E5',
'axes.edgecolor': 'none',
#'axes.facecolor': 'white',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': '0.50',
'figure.facecolor': 'white',
'font.size': 10.0,
'grid.color': 'white',
'grid.linestyle': '-',
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'xtick.color': '#555555',
'xtick.direction': 'out',
'ytick.color': '#555555',
'ytick.direction': 'out',
'axes.prop_cycle': mpl.cycler('color',
['#E24A33', '#348ABD', '#988ED5',
'#777777', '#FBC15E', '#8EBA42',
'#FFB5B8']),
}
mpl.rcParams.update(custom_gg)
NICE_DARK_BG = False
if NICE_DARK_BG:
dark_style = {
'axes.edgecolor': 'white',
'axes.facecolor': 'black',
'axes.labelcolor': 'white',
'figure.edgecolor': 'black',
'figure.facecolor': 'black',
'grid.color': 'white',
'lines.color': 'white',
'patch.edgecolor': 'white',
'savefig.edgecolor': 'black',
'savefig.facecolor': 'black',
'text.color': 'white',
'xtick.color': 'white',
'ytick.color': 'white'
}
mpl.rcParams.update(dark_style)
mpl.rcParams['figure.subplot.top'] = .8
#mpl.rcParams['text'].usetex = False
#for key in mpl_keypress_shortcuts:
# print('%s = %s' % (key, mpl.rcParams[key]))
# Disable mpl shortcuts
# mpl.rcParams['toolbar'] = 'None'
# mpl.rcParams['interactive'] = True
# import matplotlib.pyplot as plt
# plt.xkcd()
def _mpl_set_backend(target_backend):
import matplotlib as mpl
if ut.get_argflag('--leave-mpl-backend-alone'):
print('[pt] LEAVE THE BACKEND ALONE !!! was specified')
print('[pt] not changing mpl backend')
else:
#mpl.use(target_backend, warn=True, force=True)
mpl.use(target_backend, warn=True, force=False)
#mpl.use(target_backend, warn=False, force=False)
current_backend = mpl.get_backend()
if not ut.QUIET and ut.VERBOSE:
print('[pt] current backend is: %r' % current_backend)
def _init_mpl_mainprocess(verbose=VERBOSE_MPLINIT):
global __IS_INITIALIZED__
global __WHO_INITIALIZED__
import matplotlib as mpl
#mpl.interactive(True)
current_backend = mpl.get_backend()
target_backend = get_target_backend()
if __IS_INITIALIZED__ is True:
if verbose:
print('[!plottool_ibeis] matplotlib has already been initialized. backend=%r' % current_backend)
print('[!plottool_ibeis] Initially initialized by %r' % __WHO_INITIALIZED__)
print('[!plottool_ibeis] Trying to be init by %r' % (ut.get_caller_name(N=range(0, 5))))
return False
__IS_INITIALIZED__ = True
if verbose:
print('[plottool_ibeis] matplotlib initialized by %r' % __WHO_INITIALIZED__)
__WHO_INITIALIZED__ = ut.get_caller_name(N=range(0, 5))
if verbose:
print('--- INIT MPL---')
print('[pt] current backend is: %r' % current_backend)
print('[pt] mpl.use(%r)' % target_backend)
if current_backend != target_backend:
_mpl_set_backend(target_backend)
_init_mpl_rcparams()
@profile
def init_matplotlib(verbose=VERBOSE_MPLINIT):
if ut.in_main_process():
PyQt, pyqt_version = get_pyqt()
return _init_mpl_mainprocess(verbose=verbose)
|
apache-2.0
|
Aasmi/scikit-learn
|
sklearn/externals/joblib/__init__.py
|
36
|
4795
|
""" Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b2'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
|
bsd-3-clause
|
jswanljung/iris
|
docs/iris/src/userguide/plotting_examples/1d_with_legend.py
|
12
|
1235
|
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import matplotlib.pyplot as plt
import iris
import iris.plot as iplt
fname = iris.sample_data_path('air_temp.pp')
# Load exactly one cube from the given file
temperature = iris.load_cube(fname)
# We are only interested in a small number of longitudes (the 4 after and
# including the 5th element), so index them out
temperature = temperature[5:9, :]
for cube in temperature.slices('longitude'):
# Create a string label to identify this cube (i.e. latitude: value)
cube_label = 'latitude: %s' % cube.coord('latitude').points[0]
# Plot the cube, and associate it with a label
iplt.plot(cube, label=cube_label)
# Match the longitude range to global
max_lon = temperature.coord('longitude').points.max()
min_lon = temperature.coord('longitude').points.min()
plt.xlim(min_lon, max_lon)
# Add the legend with 2 columns
plt.legend(ncol=2)
# Put a grid on the plot
plt.grid(True)
# Provide some axis labels
plt.ylabel('Temerature / kelvin')
plt.xlabel('Longitude / degrees')
# And a sensible title
plt.suptitle('Air Temperature', fontsize=20, y=0.9)
# Finally, show it.
plt.show()
|
lgpl-3.0
|
danstowell/markovrenewal
|
experiments/chiffchaff.py
|
1
|
35302
|
#!/bin/env python
# script to analyse mixtures of chiffchaff audios
# by Dan Stowell, summer 2012
from glob import glob
from subprocess import call
import os.path
import csv
from math import log, exp, pi, sqrt, ceil, floor
from numpy import array, mean, cov, linalg, dot, median, std
import numpy as np
import tempfile
import shutil
from operator import itemgetter
from copy import copy, deepcopy
from sklearn.mixture import GMM
import gc
from random import uniform, shuffle
import time, datetime
from markovrenewal import mrp_autochunk_and_getclusters # MRPGraph
from evaluators import *
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from colorsys import hsv_to_rgb
import random
import os, sys
#################################################################################################
# USER SETTINGS
#analysistype = "chrm" # Ring-modulation chirplet analysis, facilitated by "chiffchaffch.py" and the other files in my chirpletringmod folder
#analysistype = "chmp" # MPTK chirplet analysis, facilitated by "likechrm.py" and the other files in my py4mptk folder
analysistype = "xcor" # Spectrotemporal cross-correlation template-matching, carried out by "xcordetect.py"
#analysistype = "psts" # Uses another MRP underneath, to stitch peaks into tweets, then models each with a polynomial fit
usetrimmed = True
datadir = os.path.expanduser("~/birdsong/xenocanto_chiffchaff")
if usetrimmed: datadir += "/trimmed"
mp3dir = datadir+"/mp3"
wavdir = datadir+"/wav"
annotdir = datadir+"/"+analysistype
csvdir = datadir+"/csv"
maxtestsetsize = 5
boutgapsecs = 1 #2 # chiffchaff bouts are typically at least 3 sec separated. this value is used to segment bouts. (since syll gap is typically 0.35 sec, this allows one or even two dropped sylls to remain in-bout.)
maxsyllgapsecs = 1 # this is passed to the MRP as the longest it needs to consider
fastdev = False # set this to True to miss out lots of tests, for rapid-tweaking purposes (doesn't make same plots)
if fastdev:
froz_nmix = [2,4]
froz_permute = [0,10,15]
else:
froz_nmix = [1,2,3,4,5]
froz_permute = None
fewerruntypes = fastdev # For the paper with Sash, we don't need the extra run types, so hard code this to True
#######FOR DEVELOPMENT ONLY: use a frozen pre-made MPTK analysis
#frozpath = os.path.expanduser("~/birdsong/xenocanto_chiffchaff/chmp/frozen_24_tmpgOtHD__chiffchaff")
frozpath = None
#################################################################################################
# loading modules depending on user settings (inc localpath ones)
if analysistype=='xcor':
import xcordetect as xcor
specgrammode = xcor.default_specgrammode
if specgrammode==None:
specgrammodecode = ''
else:
specgrammodecode = '_%s' % specgrammode
print "Building xcor template"
xcor_gridinfo = xcor.get_gridinfoGMM_cacheable(datadir, specgrammode)
if analysistype[:3]=='pst':
import peakstitcher
if analysistype=='chrm':
cmd_folder = os.path.realpath(os.path.abspath("../chirpletringmod/"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import fileutils as chf
if analysistype=='chmp':
cmd_folder = os.path.realpath(os.path.abspath("../py4mptk/"))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
import likechrm
starttime = time.time()
#################################################################################################
# PREPROCESSING
wavpaths = glob(wavdir + "/XC*.wav")
# Check the WAVs have been made from the MP3s
if len(wavpaths) == 0:
raise ValueError("Found no WAVs - please run the script to automatically convert the MP3s (chiffchaff_mp3wav.bash)")
if len(wavpaths) <= (maxtestsetsize * 2):
raise ValueError("Only found %i WAVs, whereas test set is expected to be of size %i and we need a training set too" % (len(wavpaths), maxtestsetsize))
# Load the data:
gtboutindex = 0
def makeItemObject(wavpath):
"Defines a simple struct data layout, also checking that the annotations exist and loading them, chunked into bouts."
global gtboutindex
basename = os.path.splitext(os.path.split(wavpath)[1])[0]
annotpath = "%s/%s%s.csv" % (annotdir, basename, specgrammodecode)
annotpath_d = "%s/%s%s_noise.csv" % (annotdir, basename, specgrammodecode)
if not os.path.exists(annotpath):
raise ValueError("Failed to find expected annotation file %s -- you may need to rerun %s" % (annotpath, { \
'chrm':'../chirpletringmod/chiffchaffch.py', \
'xcor':'xcordetect.py',\
'psts':'peakstitcher.py',\
}[analysistype]))
# load the annotations; and SEPARATE THE DATA INTO BOUTS using boutgapsecs
rdr = csv.DictReader(open(annotpath, 'rb'))
csvdata = [{key:float(row[key]) for key in row} for row in rdr]
if analysistype=='xcor':
xcor.add_fwisebin_data_to_csvdata(csvdata, "%s/%s%s.fwisebins.csv" % (annotdir, basename, specgrammodecode))
csvdata.sort(key=itemgetter('timepos')) # TBH I think it's usually sorted already, but belt-and-braces is OK
bouts = [[]]
prevrow = None
gtboutindex += 1 # ensure consecutive files don't share index
for row in csvdata:
if ('amp' in row) and not ('mag' in row): # harmonise terminology
row['mag'] = row['amp']
del row['amp']
if prevrow != None:
if (row['timepos'] - prevrow['timepos']) > boutgapsecs:
if len(bouts[-1])==0:
print wavpath
raise ValueError(str(row))
bouts.append([]) # begin a new bout
gtboutindex += 1
row['gtboutindex'] = gtboutindex
row['fromto'] = (row['from'], row['to']) # bleh
bouts[-1].append(row)
prevrow = row
# Now load the noise. This is simpler because we don't care about bouts or gaps.
rdr = csv.DictReader(open(annotpath_d, 'rb'))
noise = []
for row in rdr:
row = {key:float(row[key]) for key in row}
if ('amp' in row) and not ('mag' in row): # harmonise terminology
row['mag'] = row['amp']
del row['amp']
row['fromto'] = (row['from'], row['to']) # bleh
noise.append(row)
if analysistype=='xcor':
xcor.add_fwisebin_data_to_csvdata(noise, "%s/%s%s_noise.fwisebins.csv" % (annotdir, basename, specgrammodecode))
return {'annotpath':annotpath, 'basename':basename, 'wavpath':wavpath, 'bouts': bouts, 'noise': noise}
items = map(makeItemObject, wavpaths)
#################################################################################################
# TRAINING
def fitGmm(anarray):
"This ALSO normalises and returns the normalisation vectors"
themean = mean(anarray, 0)
theinvstd = std(anarray, 0)
for i,val in enumerate(theinvstd):
if val == 0.0:
theinvstd[i] = 1.0
else:
theinvstd[i] = 1.0 / val
print "theinvstd: ", theinvstd
thegmm = GMM(n_components=10, cvtype='full')
# DEACTIVATED STD STANDARDISATION
theinvstd = array([1.0 for _ in xrange(len(anarray[0]))])
if len(anarray)<10:
anarray = np.vstack((anarray, anarray)) # because scipy refuses if <10
thegmm.fit((anarray - themean) * theinvstd) # with standn
return {'gmm':thegmm, 'mean':themean, 'invstd':theinvstd}
def unigramvectorfromsyll(syll, vecextramode=None):
"Returns a list of unigram data (to be concatenated if making bigram data)"
# NB do not return numpy array, return standard list (because of concatenation!)
if vecextramode=='fwise':
# NOTE: this xcor function deliberately limits the number of frames used, for manageable data size
return map(log, xcor.fwisebindata_as_vector(syll['fwise']))
else:
return map(log, [syll['fromto'][0], syll['fromto'][1]])
def bigramvectorfromsyll(frm, too, vecextramode=None):
timedelta = too['timepos'] - frm['timepos']
magratio = too['mag'] / frm['mag']
return unigramvectorfromsyll(frm, vecextramode) + map(log, [timedelta, magratio]) + unigramvectorfromsyll(too, vecextramode)
def trainModel(items, plot=False, vecextramode=None):
"Supply some subset of 'items' and this will train a Gaussian from the log-time-deltas and log-freqs and log-mag-ratios"
trainingdata = [] # pairwise
marginaltrainingdata = [] # unigramwise
noisetrainingdata = [] # unigramwise
for item in items:
for bout in item['bouts']:
for i in xrange(len(bout)-1):
timedelta = bout[i+1]['timepos'] - bout[i]['timepos']
magratio = bout[i+1]['mag'] / bout[i]['mag']
vector = bigramvectorfromsyll(bout[i], bout[i+1], vecextramode)
trainingdata.append(vector)
for datum in bout:
vector = unigramvectorfromsyll(datum, vecextramode)
marginaltrainingdata.append(vector)
for datum in item['noise']:
vector = unigramvectorfromsyll(datum, vecextramode)
noisetrainingdata.append(vector)
trainingdata = array(trainingdata)
avgpathlen = mean([mean([len(bout) for bout in item['bouts']]) for item in items])
thegmm = fitGmm(trainingdata) # p(X & Y)
marginalgmm = fitGmm(array(marginaltrainingdata)) # p(X)
# noise model is similar to the marginal, in that it's just 2D [from, to]
while len(noisetrainingdata) < 10: noisetrainingdata += noisetrainingdata # a bit hacky - it's cos GMM refuses to fit to few datapoints; not too crucial here
noisegmm = fitGmm(array(noisetrainingdata))
model = {'gmm':thegmm, 'avgpathlen':avgpathlen, 'marginalgmm': marginalgmm, 'noisegmm': noisegmm}
return model
def plottimedeltas(items):
"Plots a histo of the timedeltas for each separate training file"
fig = plt.figure()
deltas = []
mintime=0
maxtime=0
for item in items:
deltas.append({'label': item['basename'], 'vals':[]})
for bout in item['bouts']:
for i in xrange(len(bout)-1):
timedelta = bout[i+1]['timepos'] - bout[i]['timepos']
if timedelta < mintime:
mintime = timedelta
if timedelta > maxtime:
maxtime = timedelta
deltas[-1]['vals'].append(timedelta)
for whichplot, anitem in enumerate(deltas):
ax = fig.add_subplot(len(deltas), 1, 1 + whichplot)
if whichplot==0:
plt.title("Time deltas in each training file")
plt.hist(anitem['vals'], 40, range=(mintime, maxtime))
plt.yticks([0], [anitem['label']], fontsize='xx-small')
if whichplot != len(deltas)-1:
plt.xticks([])
plt.savefig("%s/pdf/plot_timedeltahistos.pdf" % annotdir, papertype='A4', format='pdf')
plt.clf() # must do this - helps prevent memory leaks with thousands of Bboxes
def standardise_vector(vector, submodel):
mean = submodel['mean']
invstd = submodel['invstd']
return [(x - mean[i]) * invstd[i] for i,x in enumerate(vector)]
def likelihood_signal_model(model, frm, too, vecextramode=None):
"Evaluates the conditional likelihood of a transition represented by 'vector' under model 'model'"
fullprob = model['gmm'] ['gmm'].eval([ standardise_vector(bigramvectorfromsyll(frm, too, vecextramode=vecextramode) , model['gmm' ]) ])[0][0] # log(P(A AND B))
fromprob = model['marginalgmm']['gmm'].eval([ standardise_vector(unigramvectorfromsyll(frm, vecextramode=vecextramode), model['marginalgmm']) ])[0][0] # log(P(A))
return exp(fullprob - fromprob) # P(B | A)
def likelihood_marginal_model(model, syll, vecextramode=None):
"Evaluates the likelihood of a single datum represented by 'vector' under model 'model', IGNORING TRANSITIONS ETC"
aprob = model['marginalgmm']['gmm'].eval([ standardise_vector(unigramvectorfromsyll(syll, vecextramode=vecextramode), model['marginalgmm']) ])[0][0] # log(P(A))
return exp(aprob) # P(A)
def likelihood_noise_model(model, syll, vecextramode=None):
"Evaluates the likelihood of a datum represented by 'vector' under the NOISE model in 'model'"
aprob = model['noisegmm']['gmm'].eval([ standardise_vector(unigramvectorfromsyll(syll, vecextramode=vecextramode), model['noisegmm']) ])[0][0] # log(P(N))
return exp(aprob)
def sequenceloglikelihood(sequence, model, vecextramode=None):
"For a single cluster, finds the log-likelihood of the entire transition sequence (WITHOUT including birth/death)"
ll = 0.0
for i in xrange(len(sequence)-1):
a = sequence[i]
b = sequence[i+1]
prob = likelihood_signal_model(model, a, b, vecextramode=vecextramode)
ll += log(prob)
return ll
def chmodelMRPGraph_andgetclusters(data, model, estsnr=200, greedynotfull=False, vecextramode=None):
"""Factory method for using MRPGraph with chiffchaff signal model and fixed probys derived from estimated lengths and SNRs.
'expectedpathlen' is the expected number of emissions in a MRP sequence - if deathprob is fixed then it's exponential decay over sequence index.
'estsnr' is an estimate of signal-to-noise ratio (NOT in dB, as a ratio) - e.g. "2" means you estimate two-thirds of the datapoints to be non-clutter.
"""
deathprob = (1./model['avgpathlen']) # exponential decay relationship
birthprob = estsnr / ((1. + estsnr) * model['avgpathlen'])
clutterprob = 1. / (1.+estsnr)
print "Probabilities derived: birth %g, death %g, clutter %g." % (birthprob, deathprob, clutterprob)
def transprobcallback(a,b):
#prob = likelihood_signal_model(model, [a['fromto'][0], a['fromto'][1], b['timepos']-a['timepos'], b['mag']/a['mag'], b['fromto'][0], b['fromto'][1]])
prob = likelihood_signal_model(model, a, b, vecextramode=vecextramode)
# sparsify the graph to make inference more efficient - when prob==0, arcs are not created
if prob < 1e-22:
return 0.
else:
return prob
birthprobcallback = lambda a: birthprob
deathprobcallback = lambda a: deathprob
def clutterprobcallback(a):
return clutterprob * likelihood_noise_model(model, a, vecextramode=vecextramode)
cl = mrp_autochunk_and_getclusters(
# MRP ctor args:
data, transprobcallback, birthprobcallback, deathprobcallback, clutterprobcallback, maxtimedelta=maxsyllgapsecs,
# cluster-getting args:
numcutoff=90, greedynotfull=greedynotfull
)
return cl
def chmodel_baseline_andgetclusters(data, model, vecextramode=None):
"""Baseline system - simple signal/noise likelihood test for each datum, and clustering
based on a time-gap threshold. Does not use any transition information.
returns, like the MRP_agc does, {'other': [theclutter], 'clusters': [[val, [thecluster]], ...]}"""
data.sort(key=itemgetter('timepos'))
clusters = []
noise = []
prevtimepos = -9e99
for datum in data:
sig_like = likelihood_marginal_model(model, datum, vecextramode=vecextramode)
noise_like = likelihood_noise_model( model, datum, vecextramode=vecextramode)
if sig_like >= noise_like:
if (datum['timepos'] - prevtimepos) > 0.7:
# start a new cluster
clusters.append([0., []])
clusters[-1][0] += log(sig_like / noise_like)
clusters[-1][1].append(datum)
prevtimepos = datum['timepos']
else:
noise.append(datum)
return {'clusters':clusters, 'other':noise}
trainModel(items, True)
plottimedeltas(items)
#################################################################################################
# TESTING
def analysemixedwav(mixedwav, frozpath=None):
"'frozpath' is, for development purposes, a path to a 'frozen' copy of the analysis data, so 'chmp' mode won't run..."
unframed = []
if analysistype == "chrm":
framesize = 1024 #4096 ### Make sure this matches what chiffchaffch is using
hopsize = 0.5 # 0.125 ### Make sure this matches what chiffchaffch is using
chfanalysis = chf.analysefile(mixedwav, numtop=1, framesize=framesize, hopsize=hopsize)
hopsecs = float(chfanalysis['framesize'] * chfanalysis['hopsize']) / chfanalysis['srate']
framesizesecs = float(chfanalysis['framesize']) / chfanalysis['srate']
# we liberate peaks from their frames
for framepos, frame in enumerate(chfanalysis['frames']):
for peak in frame['peaks']:
peak['timepos'] = framepos * hopsecs
if peak['salience'] > 0: # filter to just downwards
unframed.append(peak)
elif analysistype == "chmp":
skipprocessing = (frozpath != None)
pass_in_tmpdir = frozpath or tmpdir
(rawdata, _) = likechrm.mptk_wav2csv_likechrm_one(mixedwav, tmpdir=pass_in_tmpdir, \
filtermingap=False, filteramp=True, ampthresh=0.001, snr=16, skipprocessing=skipprocessing)
hopsecs = likechrm.hopsecs
framesizesecs = likechrm.framesizesecs
for peak in rawdata:
if peak['salience'] > 0: # filter to just downwards
unframed.append(peak)
elif analysistype == "xcor":
# ampthreshes here are done to match what's done in the orig analysis
ampthresh = {None: 0.8, 'sash01': 0.2, 'sash02': 0.05}[specgrammode]
(unframed, _) = xcor.xcor_likechrm_one(xcor_gridinfo, datadir, mixedwav, filteramp=True, filtermingap=False, plot=True, ampthresh=ampthresh,
specgrammode=specgrammode,
plotlimitsecs=6
)
framesizesecs = float(xcor.framelen)/xcor.fs
hopsecs = framesizesecs * xcor.hop
elif analysistype[:3] == "pst":
print "* * * peakstitcher * * *"
(unframed, _) = peakstitcher.pst_likechrm_one(datadir, mixedwav, analysistype[3], filteramp=True, filtermingap=False, plot=True, ampthresh=0.1)
for datum in unframed: datum['salience'] = datum['mag']
framesizesecs, hopsecs = peakstitcher.framesizesecs_hopsecs()
else:
raise ValueError("Unknown analysis type %s" % analysistype)
print "---------------chfanalysis unframed----------------------"
saliences = [x['salience'] for x in unframed]
print "%i saliences: min, mean, median, max: %g, %g, %g, %g" % (len(saliences), min(saliences), mean(saliences), median(saliences), max(saliences))
# Filter out the weak ones:
thresh = max(saliences) * 0.1
unframed = filter(lambda x: x['salience'] > thresh, unframed)
saliences = [x['salience'] for x in unframed]
unframed.sort(key=itemgetter('timepos'))
print "%i saliences: min, mean, median, max: %g, %g, %g, %g" % (len(saliences), min(saliences), mean(saliences), median(saliences), max(saliences))
return (hopsecs, framesizesecs, unframed)
def plotactivitycurve(curve, label="0"):
pdfpath = "%s/pdf/activity_%s.pdf" % (annotdir, str(label))
#print "Will write to %s" % pdfpath
sortedtimes = sorted(curve.keys())
sortedvals = [curve[key] for key in sortedtimes]
plotfontsize = "xx-small"
fig = plt.figure()
plt.title(label, fontsize=plotfontsize)
plt.plot(sortedtimes, sortedvals, drawstyle='steps-post')
plt.ylabel("Num active bouts", fontsize=plotfontsize)
plt.xticks(fontsize=plotfontsize)
plt.yticks(fontsize=plotfontsize)
plt.xlabel("Time (s)", fontsize=plotfontsize)
plt.savefig(pdfpath, papertype='A4', format='pdf')
plt.clf() # must do this - helps prevent memory leaks with thousands of Bboxes
def plotunframed(framesizesecs, unframed, label="0", numsplit=4, colormapper=None):
pdfpath = "%s/pdf/mixture_%s.pdf" % (annotdir, str(label))
#print "Will write %i peaks to %s" % (len(unframed), pdfpath)
if len(unframed)==0:
print "Warning: plotunframed() got zero peaks; won't plot %s" % pdfpath
return
maxmag = max([x['mag'] for x in unframed])
plotfontsize = "xx-small"
fig = plt.figure()
chunkdur = (max([x['timepos'] for x in unframed]) + 0.01) / float(numsplit)
chunkclumps = [[] for _ in xrange(numsplit)]
for peak in unframed:
chunkclumps[int(floor((peak['timepos']/chunkdur)))].append(peak)
peaksplotted = 0
for whichsplit, chunk in enumerate(chunkclumps):
ax = fig.add_subplot(numsplit,1,whichsplit+1)
if len(chunk)!=0 and 'nn_gtsourceindex' in chunk[0]:
# sort so that clutter is plotted first and goes to the bottom
chunk.sort(key=itemgetter('nn_gtsourceindex'))
if whichsplit == 0:
plt.title(label, fontsize=plotfontsize)
for peak in chunk:
alpha = 1 - (peak['mag'] / maxmag)
if colormapper==None:
col = [0,0,0]
else:
col = colormapper(peak)
plt.plot([peak['timepos'], peak['timepos'] + framesizesecs], \
[peak['fromto'][0], peak['fromto'][1]], \
color=col, alpha=alpha)
plt.xlim( xmin=chunkdur * (whichsplit), xmax=chunkdur * (whichsplit+1))
plt.xticks(range(int(ceil(chunkdur * (whichsplit))), int(ceil(chunkdur * (whichsplit+1)))), fontsize=plotfontsize)
plt.ylim(2000, 9000)
peaksplotted += 1
plt.ylabel("Freq (Hz)", fontsize=plotfontsize)
plt.yticks(fontsize=plotfontsize)
print "plotunframed(%s) plotted %i peaks" % (pdfpath, peaksplotted)
plt.xlabel("Time (s)", fontsize=plotfontsize)
plt.savefig(pdfpath, papertype='A4', format='pdf')
plt.clf() # must do this - helps prevent memory leaks with thousands of Bboxes
def plot_gtindex_vs_time(clusteredindicesforplot, label="0"):
"data supplied is clustered lists of items in the form {timepos, gtboutindex_f, mag} with _f meaning mildly fuzzed"
pdfpath = "%s/pdf/gtindex_%s.pdf" % (annotdir, str(label))
if sum(len(cl) for cl in clusteredindicesforplot)==0:
print "Warning: plot_gtindex_vs_time() got zero peaks; won't plot %s" % pdfpath
return
maxmag = max([max([peak['mag'] for peak in cl]) for cl in clusteredindicesforplot])
plotfontsize = "xx-small"
fig = plt.figure()
for cl in clusteredindicesforplot:
xdata = [peak['timepos'] for peak in cl]
ydata = [peak['gtboutindex_f'] for peak in cl]
alpha = [1 - (peak['mag'] / maxmag) for peak in cl]
plt.plot(xdata, ydata, 'x-', alpha=sum(alpha) / float(len(alpha)))
plt.title(label, fontsize=plotfontsize)
plt.ylabel("gt cluster index", fontsize=plotfontsize)
plt.xticks(fontsize=plotfontsize)
plt.yticks(fontsize=plotfontsize)
plt.xlabel("Time (s)", fontsize=plotfontsize)
plt.savefig(pdfpath, papertype='A4', format='pdf')
plt.clf() # must do this - helps prevent memory leaks with thousands of Bboxes
def rainbowcolour(index, length):
return hsv_to_rgb(float(index)/length, 1.0, 0.9)
def plottimelimit(data):
"Handy data truncation for scrying plots"
return filter(lambda x: x['timepos']< 309999, data) # 12
def plotunframed_rainbow(clusters, framesizesecs, label):
mrpcolours = {-1: (0.85,0.85,0.85,0.5)}
for clustindex in xrange(len(clusters['clusters'])):
mrpcolours[clustindex] = rainbowcolour(clustindex, len(clusters['clusters']))
plotunframed(framesizesecs, plottimelimit(decluster_remember_indices(clusters)), label=label, numsplit=4, \
colormapper=lambda x: mrpcolours[x['clustid']])
def decluster_remember_indices(clusters):
declustered = []
for clid, cl in enumerate(clusters['clusters']):
for x in cl[1]:
x['clustid'] = clid
declustered.extend(cl[1])
for x in clusters['other']:
x['clustid'] = -1
declustered.extend(clusters['other'])
return declustered
if frozpath == None:
tmpdir = tempfile.mkdtemp('_chiffchaff')
else:
tmpdir = "NONE"
print "================================================================"
print "Testing phase. (tmpdir is %s)" % tmpdir
mixestotest = froz_nmix or range(2, maxtestsetsize+1)
# NB: "runtypes" list actually determines which tests will be run (...and so how long it will take...)
if fewerruntypes:
runtypes = ['ba', 'af', 'a', 'i']
else:
runtypes = ['ba', 'ag', 'af', 'a', 'is', 'i', 'ip']
results = {nmix:{runtype:[] for runtype in runtypes} for nmix in mixestotest}
firstrun = True
for permuteoffset in (froz_permute or xrange(0, len(items), maxtestsetsize)):
indices = range(len(items))
permutedlist = indices[permuteoffset:] + indices[:permuteoffset]
trainindices = permutedlist[maxtestsetsize:]
testindices = permutedlist[:maxtestsetsize]
print "trainindices: ", trainindices
print "testindices: ", testindices
trainset = [items[i] for i in trainindices]
testset = [items[i] for i in testindices ]
model = trainModel(trainset)
model_fwise = trainModel(trainset, vecextramode='fwise')
for nmix in mixestotest:
mixset = testset[:nmix]
mixedwav = "%s/mixedfile_%i_%i.wav" % (annotdir, permuteoffset, nmix) # NB mixedwav used to be written to tempdir - now in annotdir so is longterm cached
# calling a subprocess seems to inflate memory usage - avoid it in the sashmodes where the results aren't used
# http://stackoverflow.com/questions/1367373/python-subprocess-popen-oserror-errno-12-cannot-allocate-memory
if (frozpath == None) and (specgrammode != 'sash01') and (specgrammode != 'sash02'):
if os.path.exists(mixedwav):
print("Loading existing %s" % mixedwav)
else:
# sum together the audio from $nmix different wavs (different lengths no problem; though should we randomly offset?)
if nmix==1:
soxcmd = ['sox', '-m'] + [x['wavpath'] for x in mixset] + [x['wavpath'] for x in mixset] + [mixedwav]
else:
soxcmd = ['sox', '-m'] + [x['wavpath'] for x in mixset] + [mixedwav]
print soxcmd
call(soxcmd)
truebouts = []
for item in mixset:
print "item %s -- bout lengths are %s -- timespans %s" % (item['basename'], \
', '.join([str(len(bout)) for bout in item['bouts']]), \
', '.join([ "[%.1f--%.1f]" % ((min(peak['timepos'] for peak in bout),
max(peak['timepos'] for peak in bout))) for bout in item['bouts']]))
truebouts.extend(item['bouts'])
activitycurve_t = calcactivitycurve(truebouts)
if firstrun: plotactivitycurve(activitycurve_t, label="true_mixedfile_%i_%i" % (permuteoffset, nmix))
# run chirplet analysis on the mixture
(hopsecs, framesizesecs, unframed) = analysemixedwav(mixedwav, frozpath)
numpeaksfromoriganalysis = len(unframed)
if firstrun: plotunframed(framesizesecs, unframed, "mixedfile_%i_%i" % (permuteoffset, nmix))
maxtimepos = max(map(lambda x: x['timepos'], unframed))
# use all-NN to assign chirplets to their "ground-truth" source file
# -- note that this relies on an assumption about how similar the recovered chirps are in the mix and the orig CSVs
print ">allNN"
candidateNNs = []
sourcecolours = {-1: (0.85,0.85,0.85,0.5)} # for diagnostic plotting
for sourceindex, item in enumerate(mixset):
sourcecolours[sourceindex] = rainbowcolour(sourceindex, len(mixset))
for about in item['bouts']:
for peak in about:
peak['sourceindex'] = sourceindex
candidateNNs.extend(about)
for datum in unframed:
datum['nn_dist'] = 9e99
for candindex, cand in enumerate(candidateNNs):
# NB scaling is manually estimated:
# small diff in timepos is on the order of 0.05
# small diff in freq is on the order of 100
# freq is double-counted because there are 2 freqs, so if anything we'd prefer to over-weight the time
dist = (((cand['from'] - datum['fromto'][0])/100.)**2) \
+ (((cand['to'] - datum['fromto'][1])/100.)**2) \
+ (((cand['timepos']- datum['timepos'] )/0.001)**2)
if dist < datum['nn_dist']:
datum['nn_dist'] = dist
datum['nn_gtboutindex'] = cand['gtboutindex']
datum['nn_gtsourceindex'] = cand['sourceindex']
datum['datumindex'] = candindex
# now each datum should have ['nn_gtboutindex'] which we can use for evaluation
# but some of them might be noise - we assume the nearest to any particular GT is the true, and the others are noise...
nearest_dist_to_each_cand = {}
for datum in unframed:
if (datum['datumindex'] not in nearest_dist_to_each_cand) or \
(nearest_dist_to_each_cand[datum['datumindex']] > datum['nn_dist']):
nearest_dist_to_each_cand[datum['datumindex']] = datum['nn_dist']
# having found the nearest distances for each candidate, we can now kick out any who are further
for datum in unframed:
if datum['nn_dist'] > nearest_dist_to_each_cand[datum['datumindex']]:
datum['datumindex'] = -1
datum['nn_gtboutindex'] = -1
datum['nn_gtsourceindex'] = -1
print "<allNN"
thedistances = [sqrt(datum['nn_dist']) for datum in unframed]
print "allNN distances: range [%g, %g], mean %g" % (min(thedistances), max(thedistances), float(sum(thedistances))/len(thedistances))
# This little iteration may seem weird - storing a 'datumindex' inside data that actually ARE the groundtruth.
# The reason is so we can treat groundtruth and audio-analysed symmetrically when we do evaluation.
for gtindex, gtdatum in enumerate(candidateNNs):
gtdatum['datumindex'] = gtindex
if firstrun: plotunframed(framesizesecs, candidateNNs, "mixedfile_groundtruth_%i_%i" % (permuteoffset, nmix),
colormapper=lambda x: sourcecolours[x['sourceindex']])
#############################################################################
# run MRP inference on the output
actualsnr = float(len(candidateNNs))/max(1, numpeaksfromoriganalysis - len(candidateNNs))
print "actual SNR is %g (gt has %i peaks, analysis has %i peaks)" % (actualsnr, len(candidateNNs), numpeaksfromoriganalysis)
clusters = chmodelMRPGraph_andgetclusters(unframed, model, 1) # snr estimate fixed at reasonable default
mrpcolours = {-1: (0.85,0.85,0.85,0.5)} # for diagnostic plotting
for clustindex in xrange(len(clusters['clusters'])):
mrpcolours[clustindex] = rainbowcolour(clustindex, len(clusters['clusters']))
activitycurve_e = calcactivitycurve([cl[1] for cl in clusters['clusters']])
# plot, coloured in by FILE of origin -- i.e. groundtruth -- and also by estimated clustering
declustered = decluster_remember_indices(clusters)
if firstrun:
print "Plotting sourcecoloured"
plotunframed(framesizesecs, plottimelimit(declustered), label="sourcecolouredall", numsplit=4, \
colormapper=lambda x: sourcecolours[x['nn_gtsourceindex']])
plotunframed(framesizesecs, plottimelimit(filter(lambda x: x['clustid']!=-1, declustered)), label="sourcecoloured", numsplit=4, \
colormapper=lambda x: sourcecolours[x['nn_gtsourceindex']])
plotunframed(framesizesecs, plottimelimit(declustered), label="mrpcolouredall", numsplit=4, \
colormapper=lambda x: mrpcolours[x['clustid']])
plotactivitycurve(activitycurve_e, label="est_mixedfile_%i_%i" % (permuteoffset, nmix))
print "Numbers of peaks: in unframed %i, in declustered %i, in clutter %i" % (len(unframed), len(declustered), len(clusters['other']))
# compare the results of inference against the groundtruth
print "Groundtruth has %i bouts (mean len %g), %i items" % \
(len(truebouts), mean([len(x) for x in truebouts]), len(candidateNNs))
print "Recovered set has %i clusters (mean len %g), %i items (plus %i clutter)" % \
(len(clusters['clusters']), mean([len(x) for x in clusters['clusters']]), sum([len(ci) for ci in clusters['clusters']]), len(clusters['other']))
print "num peaks from orig analysis: %i" % numpeaksfromoriganalysis
# plot connected lines of clusters, on a gtboutindex-vs-time plot
if firstrun:
clusteredindicesforplot = [[{'timepos': hit['timepos'], 'gtboutindex_f': hit['nn_gtboutindex'] + ((clindex * 0.04) % 0.5), 'mag': hit['mag']} \
for hit in cl[1]] for clindex, cl in enumerate(clusters['clusters'])]
plot_gtindex_vs_time(clusteredindicesforplot, "mixedfile_%i_%i" % (permuteoffset, nmix))
# Add the results to our collections
results[nmix]['a' ].append(cluster_many_eval_stats(mixset, clusters, activitycurve_t, printindices=True))
######################################################################################################
# Now let's try other setups than the standard audio-analysis one (ideal-recovery case, baseline, etc)
if 'af' in runtypes:
print "======================================================"
print "Checking fwise case..."
clusters_af = chmodelMRPGraph_andgetclusters(unframed, model_fwise, 1, vecextramode='fwise')
results[nmix]['af'].append(cluster_many_eval_stats(mixset, clusters_af, activitycurve_t, printindices=False))
if 'ag' in runtypes:
print "======================================================"
print "Checking greedy case..."
clusters_ag = chmodelMRPGraph_andgetclusters(unframed, model, 1, greedynotfull=True)
results[nmix]['ag'].append(cluster_many_eval_stats(mixset, clusters_ag, activitycurve_t, printindices=False))
if 'ba' in runtypes:
print "======================================================"
print "Checking baseline audio case..."
clusters_ba = chmodel_baseline_andgetclusters(unframed, model)
results[nmix]['ba'].append(cluster_many_eval_stats(mixset, clusters_ba, activitycurve_t, printindices=False))
if ('ip' in runtypes) or ('i' in runtypes) or ('is' in runtypes):
# this is needed for all 'i*' run types
idealcasepeaks = []
newboutindex = 0
for source in mixset:
for bout in source['bouts']:
newboutindex += 1
for peak in bout:
peak = copy(peak)
peak['nn_gtboutindex'] = newboutindex
idealcasepeaks.append(peak)
idealcasepeaks.sort(key=itemgetter('timepos'))
if 'ip' in runtypes:
# ideal-case analysis: use the "mixset"'s precalculated chirps rather than reanalysing audio - should upper-bound real performance
print "======================================================"
print "Checking ideal-recovery-and-peeking-training case..."
peekingmodel = trainModel(mixset)
clusters_ip = chmodelMRPGraph_andgetclusters(idealcasepeaks, peekingmodel, 200)
results[nmix]['ip'].append(cluster_many_eval_stats(mixset, clusters_ip, activitycurve_t, printindices=False))
if 'i' in runtypes:
print "======================================================"
print "Checking ideal-recovery case..."
clusters_i = chmodelMRPGraph_andgetclusters(idealcasepeaks, model, 200)
results[nmix]['i' ].append(cluster_many_eval_stats(mixset, clusters_i , activitycurve_t, printindices=False))
if 'is' in runtypes:
print "======================================================"
print "Checking ideal-recovery-scramblenoise case..."
# Construct "ideal-plus-scramble" dataset - ideal, plus a duplicate marked as clutter and with timeposses shuffled
scrambled_ideal_peaks = deepcopy(idealcasepeaks)
timerange = (scrambled_ideal_peaks[0]['timepos'], scrambled_ideal_peaks[-1]['timepos'])
for i, peak in enumerate(scrambled_ideal_peaks):
peak['nn_gtboutindex'] = -1
peak['timepos'] = uniform(*timerange)
peaks_is = idealcasepeaks + scrambled_ideal_peaks
shuffle(peaks_is) # ensure order-of-presentation cannot bias results
peaks_is.sort(key=itemgetter('timepos'))
clusters_is = chmodelMRPGraph_andgetclusters(peaks_is, model, 1)
results[nmix]['is'].append(cluster_many_eval_stats(mixset, clusters_is, activitycurve_t, printindices=False))
if firstrun:
if 'i' in runtypes:
plotunframed_rainbow(clusters_i , framesizesecs, "mrpcoloured_ideal")
if 'ip' in runtypes:
plotunframed_rainbow(clusters_ip, framesizesecs, "mrpcoloured_idealpeek")
if 'is' in runtypes:
plotunframed_rainbow(clusters_is, framesizesecs, "mrpcoloured_idealscramble")
firstrun = False
plt.close('all') # does this help prevent memleaks with thousands of Bbox etc objects kept?
print("gc...")
print gc.collect()
statsfile = open("%s/chchstats%s.csv" % (annotdir, specgrammodecode), 'w')
statsfile.write('nmix,runtype,whichstat')
for i in xrange(len(results[mixestotest[0]]['a'])):
statsfile.write(',val%i' % i)
statsfile.write("\n")
statstolist = ["Fsn", "Ftrans", "Fsigtrans"]
for nmix in mixestotest:
print "-------------------------------------------"
print "Overall results for nmix %d (%d-fold xv, %s)" % (nmix, len(results[nmix]['a']), analysistype)
for runtype in runtypes:
print "[%2s] " % (runtype),
# results[nmix][runtype] is a list of dictionaries
for whichstat in statstolist:
alist = [adict[whichstat] for adict in results[nmix][runtype]]
print "%s: %-6s " % (whichstat, "%.3g" % mean(alist)),
statsfile.write("%i,%s,%s,%s\n" % (nmix, runtype, whichstat, ','.join(map(str, alist)) ))
print
statsfile.close()
#shutil.rmtree(tmpdir)
endtime = time.time()
timetaken = datetime.timedelta(seconds=endtime-starttime)
print("Finished. Time taken: %s" % (str(timetaken)))
|
gpl-2.0
|
acaciawater/spaarwater
|
spaarwater/management/commands/dump_resprobes.py
|
1
|
1958
|
'''
Created on Mar 15, 2018
@author: theo
'''
'''
Created on Feb 13, 2014
@author: theo
'''
from django.core.management.base import BaseCommand
from acacia.data.models import Series
import os,logging
import pandas as pd
logger = logging.getLogger('acacia.data')
resprobes = (502,687)
class Command(BaseCommand):
args = ''
help = 'Dumps all series as csv files'
def add_arguments(self,parser):
parser.add_argument('-d', '--dest',
action='store',
dest = 'dest',
default = '.',
help = 'destination folder')
parser.add_argument('-p', '--pk',
action='store',
type = int,
dest = 'pk',
default = None,
help = 'dump single series')
def handle1(self, *args, **options):
dest = options.get('dest', '.')
pk = options.get('pk', None)
if pk is None:
series = Series.objects.filter(pk__range=resprobes)
else:
series = Series.objects.filter(pk=pk)
data = [s.to_pandas() for s in series]
names = [s.pk for s in series]
series_data = dict(zip(names,data))
df = pd.DataFrame(series_data)
df.to_csv('resprobes.csv')
def handle(self, *args, **options):
dest = options.get('dest', '.')
if not os.path.exists(dest):
os.makedirs(dest)
pk = options.get('pk', None)
if pk is None:
series = Series.objects.filter(pk__range=resprobes)
else:
series = Series.objects.filter(pk=pk)
for s in series:
print s.id, s
filename = os.path.join(dest,'{}.csv'.format(s.pk))
with open(filename,'w') as f:
for p in s.datapoints.order_by('date'):
f.write('{},{},{},{}\n'.format(p.id,p.series_id,p.date.strftime('%d/%m/%Y %H:%M'),p.value))
|
apache-2.0
|
gamahead/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkcairo.py
|
69
|
2207
|
"""
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
import gtk
if gtk.pygtk_version < (2,7,0):
import cairo.gtk
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
backend_version = 'PyGTK(%d.%d.%d) ' % gtk.pygtk_version + \
'Pycairo(%s)' % backend_cairo.backend_version
_debug = False
#_debug = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if _debug: print 'backend_gtkcairo.%s()' % fn_name()
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKCairo(thisFig)
return FigureManagerGTK(canvas, num)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.ctx = pixmap.cairo_create()
self.ctx.save() # restore, save - when call new_gc()
else:
def set_pixmap (self, pixmap):
self.ctx = cairo.gtk.gdk_cairo_create (pixmap)
self.ctx.save() # restore, save - when call new_gc()
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
if _debug: print '%s.%s()' % (self.__class__.__name__, _fn_name())
self._renderer = RendererGTKCairo (self.figure.dpi)
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
|
gpl-3.0
|
jreback/pandas
|
pandas/tests/frame/methods/test_compare.py
|
8
|
6158
|
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis in (1, "columns"):
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]],
index=indices,
columns=columns,
)
else:
indices = pd.MultiIndex.from_product([[0, 2], ["self", "other"]])
columns = pd.Index(["col1", "col3"])
expected = pd.DataFrame(
[["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"keep_shape, keep_equal",
[
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = 4.0
result = df.compare(df2, keep_shape=keep_shape, keep_equal=keep_equal)
if keep_shape:
indices = pd.Index([0, 1, 2])
columns = pd.MultiIndex.from_product(
[["col1", "col2", "col3"], ["self", "other"]]
)
if keep_equal:
expected = pd.DataFrame(
[
["a", "c", 1.0, 1.0, 1.0, 1.0],
["b", "b", 2.0, 2.0, 2.0, 2.0],
["c", "c", np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
expected = pd.DataFrame(
[
["a", "c", np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 3.0, 4.0],
],
index=indices,
columns=columns,
)
else:
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", 1.0, 1.0], ["c", "c", 3.0, 4.0]], index=indices, columns=columns
)
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
result = df.compare(df2)
indices = pd.Index([0])
columns = pd.MultiIndex.from_product([["col1"], ["self", "other"]])
expected = pd.DataFrame([["a", "c"]], index=indices, columns=columns)
tm.assert_frame_equal(result, expected)
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
# even if the entire row or column are NaNs
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
df2.loc[2, "col3"] = np.nan
result = df.compare(df2)
indices = pd.Index([0, 2])
columns = pd.MultiIndex.from_product([["col1", "col3"], ["self", "other"]])
expected = pd.DataFrame(
[["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, np.nan]],
index=indices,
columns=columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("align_axis", [0, 1])
def test_compare_multi_index(align_axis):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]}
)
df.columns = pd.MultiIndex.from_arrays([["a", "a", "b"], ["col1", "col2", "col3"]])
df.index = pd.MultiIndex.from_arrays([["x", "x", "y"], [0, 1, 2]])
df2 = df.copy()
df2.iloc[0, 0] = "c"
df2.iloc[2, 2] = 4.0
result = df.compare(df2, align_axis=align_axis)
if align_axis == 0:
indices = pd.MultiIndex.from_arrays(
[["x", "x", "y", "y"], [0, 0, 2, 2], ["self", "other", "self", "other"]]
)
columns = pd.MultiIndex.from_arrays([["a", "b"], ["col1", "col3"]])
data = [["a", np.nan], ["c", np.nan], [np.nan, 3.0], [np.nan, 4.0]]
else:
indices = pd.MultiIndex.from_arrays([["x", "y"], [0, 2]])
columns = pd.MultiIndex.from_arrays(
[
["a", "a", "b", "b"],
["col1", "col1", "col3", "col3"],
["self", "other", "self", "other"],
]
)
data = [["a", "c", np.nan, np.nan], [np.nan, np.nan, 3.0, 4.0]]
expected = pd.DataFrame(data=data, index=indices, columns=columns)
tm.assert_frame_equal(result, expected)
def test_compare_unaligned_objects():
# test DataFrames with different indices
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 = pd.DataFrame([1, 2, 3], index=["a", "b", "c"])
df2 = pd.DataFrame([1, 2, 3], index=["a", "b", "d"])
df1.compare(df2)
# test DataFrames with different shapes
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
df1 = pd.DataFrame(np.ones((3, 3)))
df2 = pd.DataFrame(np.zeros((2, 1)))
df1.compare(df2)
|
bsd-3-clause
|
hhbyyh/spark
|
python/pyspark/sql/tests/test_pandas_udf_grouped_map.py
|
4
|
20450
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
from collections import OrderedDict
from decimal import Decimal
from distutils.version import LooseVersion
from pyspark.sql import Row
from pyspark.sql.functions import array, explode, col, lit, udf, sum, pandas_udf, PandasUDFType
from pyspark.sql.types import *
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
class GroupedMapPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
return self.spark.range(10).toDF('id') \
.withColumn("vs", array([lit(i) for i in range(20, 30)])) \
.withColumn("v", explode(col('vs'))).drop('vs')
def test_supported_types(self):
import pyarrow as pa
values = [
1, 2, 3,
4, 5, 1.1,
2.2, Decimal(1.123),
[1, 2, 2], True, 'hello'
]
output_fields = [
('id', IntegerType()), ('byte', ByteType()), ('short', ShortType()),
('int', IntegerType()), ('long', LongType()), ('float', FloatType()),
('double', DoubleType()), ('decim', DecimalType(10, 3)),
('array', ArrayType(IntegerType())), ('bool', BooleanType()), ('str', StringType())
]
# TODO: Add BinaryType to variables above once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) >= LooseVersion("0.10.0"):
values.append(bytearray([0x01, 0x02]))
output_fields.append(('bin', BinaryType()))
output_schema = StructType([StructField(*x) for x in output_fields])
df = self.spark.createDataFrame([values], schema=output_schema)
# Different forms of group map pandas UDF, results of these are the same
udf1 = pandas_udf(
lambda pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf2 = pandas_udf(
lambda _, pdf: pdf.assign(
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
),
output_schema,
PandasUDFType.GROUPED_MAP
)
udf3 = pandas_udf(
lambda key, pdf: pdf.assign(
id=key[0],
byte=pdf.byte * 2,
short=pdf.short * 2,
int=pdf.int * 2,
long=pdf.long * 2,
float=pdf.float * 2,
double=pdf.double * 2,
decim=pdf.decim * 2,
bool=False if pdf.bool else True,
str=pdf.str + 'there',
array=pdf.array,
),
output_schema,
PandasUDFType.GROUPED_MAP
)
result1 = df.groupby('id').apply(udf1).sort('id').toPandas()
expected1 = df.toPandas().groupby('id').apply(udf1.func).reset_index(drop=True)
result2 = df.groupby('id').apply(udf2).sort('id').toPandas()
expected2 = expected1
result3 = df.groupby('id').apply(udf3).sort('id').toPandas()
expected3 = expected1
self.assertPandasEqual(expected1, result1)
self.assertPandasEqual(expected2, result2)
self.assertPandasEqual(expected3, result3)
def test_array_type_correct(self):
df = self.data.withColumn("arr", array(col("id"))).repartition(1, "id")
output_schema = StructType(
[StructField('id', LongType()),
StructField('v', IntegerType()),
StructField('arr', ArrayType(LongType()))])
udf = pandas_udf(
lambda pdf: pdf,
output_schema,
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_register_grouped_map_udf(self):
foo_udf = pandas_udf(lambda x: x, "id long", PandasUDFType.GROUPED_MAP)
with QuietTest(self.sc):
with self.assertRaisesRegexp(
ValueError,
'f.*SQL_BATCHED_UDF.*SQL_SCALAR_PANDAS_UDF.*SQL_GROUPED_AGG_PANDAS_UDF.*'):
self.spark.catalog.registerFunction("foo_udf", foo_udf)
def test_decorator(self):
df = self.data
@pandas_udf(
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
def foo(pdf):
return pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_coerce(self):
df = self.data
foo = pandas_udf(
lambda pdf: pdf,
'id long, v double',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo.func).reset_index(drop=True)
expected = expected.assign(v=expected.v.astype('float64'))
self.assertPandasEqual(expected, result)
def test_complex_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby(col('id') % 2 == 0).apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = pdf.groupby(pdf['id'] % 2 == 0).apply(normalize.func)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_empty_groupby(self):
df = self.data
@pandas_udf(
'id long, v int, norm double',
PandasUDFType.GROUPED_MAP
)
def normalize(pdf):
v = pdf.v
return pdf.assign(norm=(v - v.mean()) / v.std())
result = df.groupby().apply(normalize).sort('id', 'v').toPandas()
pdf = df.toPandas()
expected = normalize.func(pdf)
expected = expected.sort_values(['id', 'v']).reset_index(drop=True)
expected = expected.assign(norm=expected.norm.astype('float64'))
self.assertPandasEqual(expected, result)
def test_datatype_string(self):
df = self.data
foo_udf = pandas_udf(
lambda pdf: pdf.assign(v1=pdf.v * pdf.id * 1.0, v2=pdf.v + pdf.id),
'id long, v int, v1 double, v2 long',
PandasUDFType.GROUPED_MAP
)
result = df.groupby('id').apply(foo_udf).sort('id').toPandas()
expected = df.toPandas().groupby('id').apply(foo_udf.func).reset_index(drop=True)
self.assertPandasEqual(expected, result)
def test_wrong_return_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegexp(
NotImplementedError,
'Invalid returnType.*grouped map Pandas UDF.*MapType'):
pandas_udf(
lambda pdf: pdf,
'id long, v map<int, int>',
PandasUDFType.GROUPED_MAP)
def test_wrong_args(self):
df = self.data
with QuietTest(self.sc):
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(lambda x: x)
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(udf(lambda x: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(sum(df.v))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(df.v + 1)
with self.assertRaisesRegexp(ValueError, 'Invalid function'):
df.groupby('id').apply(
pandas_udf(lambda: 1, StructType([StructField("d", DoubleType())])))
with self.assertRaisesRegexp(ValueError, 'Invalid udf'):
df.groupby('id').apply(pandas_udf(lambda x, y: x, DoubleType()))
with self.assertRaisesRegexp(ValueError, 'Invalid udf.*GROUPED_MAP'):
df.groupby('id').apply(
pandas_udf(lambda x, y: x, DoubleType(), PandasUDFType.SCALAR))
def test_unsupported_types(self):
import pyarrow as pa
common_err_msg = 'Invalid returnType.*grouped map Pandas UDF.*'
unsupported_types = [
StructField('map', MapType(StringType(), IntegerType())),
StructField('arr_ts', ArrayType(TimestampType())),
StructField('null', NullType()),
]
# TODO: Remove this if-statement once minimum pyarrow version is 0.10.0
if LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
unsupported_types.append(StructField('bin', BinaryType()))
for unsupported_type in unsupported_types:
schema = StructType([StructField('id', LongType(), True), unsupported_type])
with QuietTest(self.sc):
with self.assertRaisesRegexp(NotImplementedError, common_err_msg):
pandas_udf(lambda x: x, schema, PandasUDFType.GROUPED_MAP)
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
df = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
foo_udf = pandas_udf(lambda pdf: pdf, 'time timestamp', PandasUDFType.GROUPED_MAP)
result = df.groupby('time').apply(foo_udf).sort('time')
self.assertPandasEqual(df.toPandas(), result.toPandas())
def test_udf_with_key(self):
import numpy as np
df = self.data
pdf = df.toPandas()
def foo1(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
return pdf.assign(v1=key[0],
v2=pdf.v * key[0],
v3=pdf.v * pdf.id,
v4=pdf.v * pdf.id.mean())
def foo2(key, pdf):
assert type(key) == tuple
assert type(key[0]) == np.int64
assert type(key[1]) == np.int32
return pdf.assign(v1=key[0],
v2=key[1],
v3=pdf.v * key[0],
v4=pdf.v + key[1])
def foo3(key, pdf):
assert type(key) == tuple
assert len(key) == 0
return pdf.assign(v1=pdf.v * pdf.id)
# v2 is int because numpy.int64 * pd.Series<int32> results in pd.Series<int32>
# v3 is long because pd.Series<int64> * pd.Series<int32> results in pd.Series<int64>
udf1 = pandas_udf(
foo1,
'id long, v int, v1 long, v2 int, v3 long, v4 double',
PandasUDFType.GROUPED_MAP)
udf2 = pandas_udf(
foo2,
'id long, v int, v1 long, v2 int, v3 int, v4 int',
PandasUDFType.GROUPED_MAP)
udf3 = pandas_udf(
foo3,
'id long, v int, v1 long',
PandasUDFType.GROUPED_MAP)
# Test groupby column
result1 = df.groupby('id').apply(udf1).sort('id', 'v').toPandas()
expected1 = pdf.groupby('id')\
.apply(lambda x: udf1.func((x.id.iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected1, result1)
# Test groupby expression
result2 = df.groupby(df.id % 2).apply(udf1).sort('id', 'v').toPandas()
expected2 = pdf.groupby(pdf.id % 2)\
.apply(lambda x: udf1.func((x.id.iloc[0] % 2,), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected2, result2)
# Test complex groupby
result3 = df.groupby(df.id, df.v % 2).apply(udf2).sort('id', 'v').toPandas()
expected3 = pdf.groupby([pdf.id, pdf.v % 2])\
.apply(lambda x: udf2.func((x.id.iloc[0], (x.v % 2).iloc[0],), x))\
.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected3, result3)
# Test empty groupby
result4 = df.groupby().apply(udf3).sort('id', 'v').toPandas()
expected4 = udf3.func((), pdf)
self.assertPandasEqual(expected4, result4)
def test_column_order(self):
import pandas as pd
# Helper function to set column names from a list
def rename_pdf(pdf, names):
pdf.rename(columns={old: new for old, new in
zip(pd_result.columns, names)}, inplace=True)
df = self.data
grouped_df = df.groupby('id')
grouped_pdf = df.toPandas().groupby('id')
# Function returns a pdf with required column names, but order could be arbitrary using dict
def change_col_order(pdf):
# Constructing a DataFrame from a dict should result in the same order,
# but use from_items to ensure the pdf column order is different than schema
return pd.DataFrame.from_items([
('id', pdf.id),
('u', pdf.v * 2),
('v', pdf.v)])
ordered_udf = pandas_udf(
change_col_order,
'id long, v int, u int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by name from the pdf
result = grouped_df.apply(ordered_udf).sort('id', 'v')\
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(change_col_order)
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with positional columns, indexed by range
def range_col_order(pdf):
# Create a DataFrame with positional columns, fix types to long
return pd.DataFrame(list(zip(pdf.id, pdf.v * 3, pdf.v)), dtype='int64')
range_udf = pandas_udf(
range_col_order,
'id long, u long, v long',
PandasUDFType.GROUPED_MAP
)
# The UDF result uses positional columns from the pdf
result = grouped_df.apply(range_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(range_col_order)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
# Function returns a pdf with columns indexed with integers
def int_index(pdf):
return pd.DataFrame(OrderedDict([(0, pdf.id), (1, pdf.v * 4), (2, pdf.v)]))
int_index_udf = pandas_udf(
int_index,
'id long, u int, v int',
PandasUDFType.GROUPED_MAP
)
# The UDF result should assign columns by position of integer index
result = grouped_df.apply(int_index_udf).sort('id', 'v') \
.select('id', 'u', 'v').toPandas()
pd_result = grouped_pdf.apply(int_index)
rename_pdf(pd_result, ['id', 'u', 'v'])
expected = pd_result.sort_values(['id', 'v']).reset_index(drop=True)
self.assertPandasEqual(expected, result)
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def column_name_typo(pdf):
return pd.DataFrame({'iid': pdf.id, 'v': pdf.v})
@pandas_udf('id long, v int', PandasUDFType.GROUPED_MAP)
def invalid_positional_types(pdf):
return pd.DataFrame([(u'a', 1.2)])
with QuietTest(self.sc):
with self.assertRaisesRegexp(Exception, "KeyError: 'id'"):
grouped_df.apply(column_name_typo).collect()
import pyarrow as pa
if LooseVersion(pa.__version__) < LooseVersion("0.11.0"):
# TODO: see ARROW-1949. Remove when the minimum PyArrow version becomes 0.11.0.
with self.assertRaisesRegexp(Exception, "No cast implemented"):
grouped_df.apply(invalid_positional_types).collect()
else:
with self.assertRaisesRegexp(Exception, "an integer is required"):
grouped_df.apply(invalid_positional_types).collect()
def test_positional_assignment_conf(self):
import pandas as pd
with self.sql_conf({
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName": False}):
@pandas_udf("a string, b float", PandasUDFType.GROUPED_MAP)
def foo(_):
return pd.DataFrame([('hi', 1)], columns=['x', 'y'])
df = self.data
result = df.groupBy('id').apply(foo).select('a', 'b').collect()
for r in result:
self.assertEqual(r.a, 'hi')
self.assertEqual(r.b, 1)
def test_self_join_with_pandas(self):
@pandas_udf('key long, col string', PandasUDFType.GROUPED_MAP)
def dummy_pandas_udf(df):
return df[['key', 'col']]
df = self.spark.createDataFrame([Row(key=1, col='A'), Row(key=1, col='B'),
Row(key=2, col='C')])
df_with_pandas = df.groupBy('key').apply(dummy_pandas_udf)
# this was throwing an AnalysisException before SPARK-24208
res = df_with_pandas.alias('temp0').join(df_with_pandas.alias('temp1'),
col('temp0.key') == col('temp1.key'))
self.assertEquals(res.count(), 5)
def test_mixed_scalar_udfs_followed_by_grouby_apply(self):
import pandas as pd
df = self.spark.range(0, 10).toDF('v1')
df = df.withColumn('v2', udf(lambda x: x + 1, 'int')(df['v1'])) \
.withColumn('v3', pandas_udf(lambda x: x + 2, 'int')(df['v1']))
result = df.groupby() \
.apply(pandas_udf(lambda x: pd.DataFrame([x.sum().sum()]),
'sum int',
PandasUDFType.GROUPED_MAP))
self.assertEquals(result.collect()[0]['sum'], 165)
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_grouped_map import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
geoscixyz/em_examples
|
em_examples/InductionSphereTEM.py
|
1
|
19333
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
##############################################
# PLOTTING FUNCTIONS FOR WIDGETS
##############################################
def fcn_TDEM_InductionSpherePlaneWidget(xtx,ytx,ztx,m,orient,x0,y0,z0,a,sig,mur,xrx,yrx,zrx,logt,Comp,Type):
sig = 10**sig
t = 10**logt
if Type == 'B':
Type = 'b'
elif Type == 'dB/dt':
Type = 'dbdt'
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, ymin, ymax, dy = -30., 30., 0.3, -30., 30., 0.4
X,Y = np.mgrid[xmin:xmax+dx:dx, ymin:ymax+dy:dy]
X = np.transpose(X)
Y = np.transpose(Y)
Obj = SphereTEM(m,orient,xtx,ytx,ztx)
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,y0,z0,X,Y,zrx,Type)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,y0,z0,xrx,yrx,zrx,Type)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.43,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
if Comp == 'x':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Bx,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,Comp,Type)
elif Comp == 'y':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,By,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Byi,Comp,Type)
elif Comp == 'z':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Bz,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,Comp,Type)
elif Comp == 'abs':
Ax1 = plotAnomalyXYplane(Ax1,t,X,Y,ztx,Babs,Comp,Type)
Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a)
Ax2 = plotResponseTEM(Ax2,t,tvec,Babsi,Comp,Type)
plt.show(fig1)
def fcn_TDEM_InductionSphereProfileWidget(xtx,ztx,m,orient,x0,z0,a,sig,mur,xrx,zrx,logt,Flag):
sig = 10**sig
t = 10**logt
if orient == "Vert. Coaxial":
orient = 'x'
elif orient == "Horiz. Coplanar":
orient = 'z'
if Flag == 'dBs/dt':
Type = 'dbdt'
else:
Type = 'b'
# Same global functions can be used but with ytx, y0, yrx, Y = 0.
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, zmin, zmax, dz = -30., 30., 0.3, -40., 20., 0.4
X,Z = np.mgrid[xmin:xmax+dx:dx, zmin:zmax+dz:dz]
X = np.transpose(X)
Z = np.transpose(Z)
Obj = SphereTEM(m,orient,xtx,0.,ztx)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,0.,z0,xrx,0.,zrx,Type)
Hxt,Hyt,Hzt = fcn_ComputePrimary(m,orient,xtx,0.,ztx,x0,0.,z0)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.38,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
Ax1 = plotProfileTxRxSphere(Ax1,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient)
if Flag == 'Bp':
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,0.,ztx,X,0.,Z)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Hxt,Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Hpx,Hpz,Flag)
elif Flag == 'Bs':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,Type)
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Type)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
elif Flag == 'dBs/dt':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,Type)
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Type)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
if (orient == 'x') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
plt.show(fig1)
def fcn_TDEM_InductionSphereProfileEM61Widget(xtx,ztx,L,m,orient,x0,z0,a,sig,mur,logt,Flag):
xtx = xtx - L/2
xrx = xtx + L
zrx = ztx
sig = 10**sig
t = 10**logt
if orient == "Vert. Coaxial":
orient = 'x'
elif orient == "Horiz. Coplanar":
orient = 'z'
if Flag == 'dBs/dt':
Type = 'dbdt'
else:
Type = 'b'
# Same global functions can be used but with ytx, y0, yrx, Y = 0.
tvec = np.logspace(-6,0,31)
xmin, xmax, dx, zmin, zmax, dz = -30., 30., 0.3, -40., 20., 0.4
X,Z = np.mgrid[xmin:xmax+dx:dx, zmin:zmax+dz:dz]
X = np.transpose(X)
Z = np.transpose(Z)
Obj = SphereTEM(m,orient,xtx,0.,ztx)
Bxi,Byi,Bzi,Babsi = Obj.fcn_ComputeTimeResponse(tvec,sig,mur,a,x0,0.,z0,xrx,0.,zrx,Type)
Hxt,Hyt,Hzt = fcn_ComputePrimary(m,orient,xtx,0.,ztx,x0,0.,z0)
fig1 = plt.figure(figsize=(17,6))
Ax1 = fig1.add_axes([0.04,0,0.38,1])
Ax2 = fig1.add_axes([0.6,0,0.4,1])
Ax1 = plotProfileTxRxSphere(Ax1,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient)
if Flag == 'Bp':
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,0.,ztx,X,0.,Z)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Hxt,Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Hpx,Hpz,Flag)
elif Flag == 'Bs':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,'b')
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
elif Flag == 'dBs/dt':
Bx,By,Bz,Babs = Obj.fcn_ComputeTimeResponse(t,sig,mur,a,x0,0.,z0,X,0.,Z,'dbdt')
Chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Ax1 = plotProfileTxRxArrow(Ax1,x0,z0,Chi*Hxt,Chi*Hzt,Flag)
Ax1 = plotProfileXZplane(Ax1,X,Z,Bx,Bz,Flag)
if (orient == 'x') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bp'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'Bs'):
Type = 'b'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
elif (orient == 'x') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bxi,orient,Type)
elif (orient == 'z') & (Flag == 'dBs/dt'):
Type = 'dbdt'
Ax2 = plotResponseTEM(Ax2,t,tvec,Bzi,orient,Type)
plt.show(fig1)
##############################################
# GLOBAL FUNTIONS
##############################################
def fcn_ComputeExcitation_TEM(t,sig,mur,a,Type):
"""Compute Excitation Factor (TEM)"""
beta = np.sqrt(4*np.pi*1e-7*sig) * a
N = 2000
nvec = np.linspace(1,N,N)
if mur < 1.01:
chi = np.zeros(np.size(t))
if Type == 'b':
if np.size(t) == 1:
SUM_1 = np.sum(np.exp(-(nvec*beta)**2/t))
SUM_2 = np.sum(nvec*sp.special.erfc(nvec*beta/np.sqrt(t)))
chi = (9/2)*(1/3 + t/beta**2 - (2/beta)*np.sqrt(t/np.pi)*(1 + 2*SUM_1) + 4*SUM_2)
else:
for tt in range(0,np.size(t)):
SUM_1 = np.sum(np.exp(-(nvec*beta)**2/t[tt]))
SUM_2 = np.sum(nvec*sp.special.erfc(nvec*beta/np.sqrt(t[tt])))
chi[tt] = (9/2)*(1/3 + t[tt]/beta**2 - (2/beta)*np.sqrt(t[tt]/np.pi)*(1 + 2*SUM_1) + 4*SUM_2)
elif Type == 'dbdt':
if np.size(t) == 1:
SUM = np.sum(np.exp(-(nvec*beta)**2/t))
chi = (9/2)*(1/beta**2 - (1/(beta*np.sqrt(np.pi*t)))*(1 + 2*SUM))
else:
for tt in range(0,np.size(t)):
SUM = np.sum(np.exp(-(nvec*beta)**2/t[tt]))
chi[tt] = (9/2)*(1/beta**2 - (1/(beta*np.sqrt(np.pi*t[tt])))*(1 + 2*SUM))
else:
N = 2000 # Coefficients
eta = np.pi * (np.linspace(1,N,N) + 1/4)
eta0 = np.pi * np.linspace(1,N,N)
# Converge eta coefficients
for pp in range (0,10):
eta = eta0 + np.arctan((mur - 1)*eta/(mur - 1 + eta**2))
chi = np.zeros(np.size(t))
# Get Excitation Factor
if Type == 'b':
if np.size(t) == 1:
chi = (9*mur)*np.sum( np.exp(-t*(eta/beta)**2)/((mur+2)*(mur-1) + eta**2) )
else:
for tt in range(0,np.size(t)):
chi[tt] = (9*mur)*np.sum( np.exp(-t[tt]*(eta/beta)**2)/((mur+2)*(mur-1) + eta**2) )
elif Type == 'dbdt':
if np.size(t) == 1:
chi = -(9*mur)*np.sum( eta**2*np.exp(-t*(eta/beta)**2)/(beta**2*((mur+2)*(mu-1) + eta**2)) )
else:
for tt in range(0,np.size(t)):
chi[tt] = -(9*mur)*np.sum( eta**2*np.exp(-t[tt]*(eta/beta)**2)/(beta**2*((mur+2)*(mu-1) + eta**2)) )
return chi
def fcn_ComputePrimary(m,orient,xtx,ytx,ztx,X,Y,Z):
"""Computes Inducing Field at Sphere"""
R = np.sqrt((X-xtx)**2 + (Y-ytx)**2 + (Z-ztx)**2)
if orient == "x":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(X-xtx)/R**5 - m/R**3)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(X-xtx)/R**5)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(X-xtx)/R**5)
elif orient == "y":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(Y-ytx)/R**5)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(Y-ytx)/R**5 - m/R**3)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(Y-ytx)/R**5)
elif orient == "z":
Hpx = (1/(4*np.pi))*(3*m*(X-xtx)*(Z-ztx)/R**5)
Hpy = (1/(4*np.pi))*(3*m*(Y-ytx)*(Z-ztx)/R**5)
Hpz = (1/(4*np.pi))*(3*m*(Z-ztx)*(Z-ztx)/R**5 - m/R**3)
return Hpx, Hpy, Hpz
##############################################
# GLOBAL PLOTTING FUNTIONS
##############################################
def plotAnomalyXYplane(Ax,t,X,Y,Z,B,Comp,Type):
FS = 20
tol = 1e5
Sign = np.sign(B)
B = 1e9*np.abs(B) # convert to nT or nT/s
MAX = np.max(B)
B = np.log10(tol*B/MAX)
Sign[B<0] = 0.
B[B<0] = 0.
Cmap = 'RdYlBu'
#Cmap = 'seismic_r'
if Comp == 'abs':
TickLabels = MAX*np.array([1.,1e-1,1e-2,1e-3,1e-4,0.,-1e-4,-1e-3,-1e-2,-1e-1,-1])
TickLabels = ["%.1e" % x for x in TickLabels]
Cplot = Ax.contourf(X,Y,Sign*B,50,cmap=Cmap, vmin=-5, vmax=5)
cbar = plt.colorbar(Cplot, ax=Ax, pad=0.02, ticks=-np.linspace(-5,5,11))
else:
TickLabels = MAX*np.array([-1.,-1e-1,-1e-2,-1e-3,-1e-4,0.,1e-4,1e-3,1e-2,1e-1,1])
TickLabels = ["%.1e" % x for x in TickLabels]
Cplot = Ax.contourf(X,Y,Sign*B,50,cmap=Cmap, vmin=-5, vmax=5)
cbar = plt.colorbar(Cplot, ax=Ax, pad=0.02, ticks=np.linspace(-5,5,11))
if Comp == 'x' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{Bx}$",fontsize=FS+6)
elif Comp == 'y' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{By}$",fontsize=FS+6)
elif Comp == 'z' and Type == 'b':
cbar.set_label('[nT]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{Bz}$",fontsize=FS+6)
elif Comp == 'x' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBx/dt}$",fontsize=FS+6)
elif Comp == 'y' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBy/dt}$",fontsize=FS+6)
elif Comp == 'z' and Type == 'dbdt':
cbar.set_label('[nT/s]', rotation=270, labelpad = 25, size=FS+4)
Ax.set_title("$\mathbf{dBz/dt}$",fontsize=FS+6)
cbar.set_ticklabels(TickLabels)
cbar.ax.tick_params(labelsize=FS-2)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Y),np.max(Y))
Ax.set_xlabel('X [m]',fontsize=FS+2)
Ax.set_ylabel('Y [m]',fontsize=FS+2,labelpad=-10)
Ax.tick_params(labelsize=FS-2)
return Ax
def plotPlaceTxRxSphereXY(Ax,xtx,ytx,xrx,yrx,x0,y0,a):
Xlim = Ax.get_xlim()
Ylim = Ax.get_ylim()
FS = 20
Ax.scatter(xtx,ytx,s=100,color='k')
Ax.text(xtx-0.75,ytx+1.5,'$\mathbf{Tx}$',fontsize=FS+6)
Ax.scatter(xrx,yrx,s=100,color='k')
Ax.text(xrx-0.75,yrx-4,'$\mathbf{Rx}$',fontsize=FS+6)
xs = x0 + a*np.cos(np.linspace(0,2*np.pi,41))
ys = y0 + a*np.sin(np.linspace(0,2*np.pi,41))
Ax.plot(xs,ys,ls=':',color='k',linewidth=3)
Ax.set_xbound(Xlim)
Ax.set_ybound(Ylim)
return Ax
def plotResponseTEM(Ax,ti,t,B,Comp,Type):
FS = 20
B = 1e9*np.abs(B) # turn to nT or nT/s and python can`t loglog negative values!
if Type == 'b':
Ylim = np.array([B[0]/1e3,B[0]])
elif Type == 'dbdt':
Ylim = np.array([B[0]/1e6,B[0]])
B[B < Ylim[0]] = 0.1*Ylim[0]
xTicks = (np.logspace(np.log(np.min(t)),np.log(np.max(t)),7))
Ax.grid('both', linestyle='-', linewidth=0.8, color=[0.8, 0.8, 0.8])
Ax.loglog(t,0*t,color='k',linewidth=2)
Ax.loglog(t,B,color='k',linewidth=4)
Ax.loglog(np.array([ti,ti]),1.1*Ylim,linewidth=3,color='r')
Ax.set_xbound(np.min(t),np.max(t))
Ax.set_ybound(1.1*Ylim)
Ax.set_xlabel('Times [s]',fontsize=FS+2)
Ax.tick_params(labelsize=FS-2)
Ax.yaxis.set_major_formatter(FormatStrFormatter('%.1e'))
if Comp == 'x' and Type == 'b':
Ax.set_ylabel('$\mathbf{|Bx|}$ [nT]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{Bx}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'z' and Type == 'b':
Ax.set_ylabel('$\mathbf{|Bz|}$ [nT]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{Bz}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'x' and Type == 'dbdt':
Ax.set_ylabel('$\mathbf{|dBx/dt|}$ [nT/s]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{dBx/dt}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
elif Comp == 'z' and Type == 'dbdt':
Ax.set_ylabel('$\mathbf{|dBz/dt|}$ [nT/s]',fontsize=FS+4,labelpad=-5)
Ax.set_title('$\mathbf{dBz/dt}$ Response at $\mathbf{Rx}$',fontsize=FS+6)
return Ax
def plotProfileTxRxSphere(Ax,xtx,ztx,x0,z0,a,xrx,zrx,X,Z,orient):
FS = 22
phi = np.linspace(0,2*np.pi,41)
psi = np.linspace(0,np.pi,21)
if orient == 'x':
Xtx = xtx + 0.5*np.cos(phi)
Ztx = ztx + 2*np.sin(phi)
Xrx = xrx + 0.5*np.cos(phi)
Zrx = zrx + 2*np.sin(phi)
elif orient == 'z':
Xtx = xtx + 2*np.cos(phi)
Ztx = ztx + 0.5*np.sin(phi)
Xrx = xrx + 2*np.cos(phi)
Zrx = zrx + 0.5*np.sin(phi)
# Xs = x0 + a*np.cos(psi)
# Zs1 = z0 + a*np.sin(psi)
# Zs2 = z0 - a*np.sin(psi)
XS = x0 + a*np.cos(phi)
ZS = z0 + a*np.sin(phi)
Ax.fill_between(np.array([np.min(X),np.max(X)]),np.array([0.,0.]),np.array([np.max(Z),np.max(Z)]),facecolor=(0.9,0.9,0.9))
Ax.fill_between(np.array([np.min(X),np.max(X)]),np.array([0.,0.]),np.array([np.min(Z),np.min(Z)]),facecolor=(0.6,0.6,0.6),linewidth=2)
# Ax.fill_between(Xs,Zs1,Zs2,facecolor=(0.4,0.4,0.4),linewidth=4)
polyObj = plt.Polygon(np.c_[XS,ZS],closed=True,facecolor=((0.4,0.4,0.4)),edgecolor='k',linewidth=2)
Ax.add_patch(polyObj)
Ax.plot(Xtx,Ztx,'k',linewidth=4)
Ax.plot(Xrx,Zrx,'k',linewidth=4)
# Ax.plot(x0+a*np.cos(phi),z0+a*np.sin(phi),'k',linewidth=2)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Z),np.max(Z))
Ax.text(xtx-4,ztx+2,'$\mathbf{Tx}$',fontsize=FS)
Ax.text(xrx,zrx+2,'$\mathbf{Rx}$',fontsize=FS)
return Ax
def plotProfileXZplane(Ax,X,Z,Bx,Bz,Flag):
FS = 20
if Flag == 'Bp':
Ax.streamplot(X,Z,Bx,Bz,color='b',linewidth=3.5,arrowsize=2)
Ax.set_title('Primary Field',fontsize=FS+6)
elif Flag == 'Bs':
Ax.streamplot(X,Z,Bx,Bz,color='r',linewidth=3.5,arrowsize=2)
Ax.set_title('Secondary Field',fontsize=FS+6)
elif Flag == 'dBs/dt':
Ax.streamplot(X,Z,Bx,Bz,color='r',linewidth=3.5,arrowsize=2)
Ax.set_title('Secondary Time Derivative',fontsize=FS+6)
Ax.set_xbound(np.min(X),np.max(X))
Ax.set_ybound(np.min(Z),np.max(Z))
Ax.set_xlabel('X [m]',fontsize=FS+2)
Ax.set_ylabel('Z [m]',fontsize=FS+2,labelpad=-10)
Ax.tick_params(labelsize=FS-2)
def plotProfileTxRxArrow(Ax,x0,z0,Bxt,Bzt,Flag):
Babst = np.sqrt(Bxt**2 + Bzt**2)
dx = Bxt/Babst
dz = Bzt/Babst
if Flag == 'Bp':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.,0.,0.8), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
elif Flag == 'Bs':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.8,0.,0.), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
elif Flag == 'dBs/dt':
Ax.arrow(x0-2.5*dx, z0-2.75*dz, 3*dx, 3*dz, fc=(0.8,0.,0.), ec="k",head_width=2.5, head_length=2.5,width=1,linewidth=2)
return Ax
############################################
# CLASS: SPHERE TOP VIEW
############################################
############################################
# DEFINE CLASS
class SphereTEM():
"""Fucntionwhcihdf
Input variables:
Output variables:
"""
def __init__(self,m,orient,xtx,ytx,ztx):
"""Defines Initial Attributes"""
# INITIALIZES OBJECT
# m: Transmitter dipole moment
# orient: Transmitter dipole orentation 'x', 'y' or 'z'
# xtx: Transmitter x location
# ytx: Transmitter y location
# ztx: Transmitter z location
self.m = m
self.orient = orient
self.xtx = xtx
self.ytx = ytx
self.ztx = ztx
############################################
# DEFINE METHODS
def fcn_ComputeTimeResponse(self,t,sig,mur,a,x0,y0,z0,X,Y,Z,Type):
"""Compute Single Frequency Response at (X,Y,Z) in T or T/s"""
m = self.m
orient = self.orient
xtx = self.xtx
ytx = self.ytx
ztx = self.ztx
chi = fcn_ComputeExcitation_TEM(t,sig,mur,a,Type)
Hpx,Hpy,Hpz = fcn_ComputePrimary(m,orient,xtx,ytx,ztx,x0,y0,z0)
mx = 4*np.pi*a**3*chi*Hpx/3
my = 4*np.pi*a**3*chi*Hpy/3
mz = 4*np.pi*a**3*chi*Hpz/3
R = np.sqrt((X-x0)**2 + (Y-y0)**2 + (Z-z0)**2)
Bx = (1e-9)*(3*(X-x0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - mx/R**3)
By = (1e-9)*(3*(Y-y0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - my/R**3)
Bz = (1e-9)*(3*(Z-z0)*(mx*(X-x0) + my*(Y-y0) + mz*(Z-z0))/R**5 - mz/R**3)
Babs = np.sqrt(Bx**2 + By**2 + Bz**2)
return Bx, By, Bz, Babs
|
mit
|
lenovor/scikit-learn
|
sklearn/svm/tests/test_svm.py
|
116
|
31653
|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
|
bsd-3-clause
|
sanketloke/scikit-learn
|
examples/covariance/plot_robust_vs_empirical_covariance.py
|
73
|
6451
|
r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
|
bsd-3-clause
|
berkeley-stat159/project-epsilon
|
code/utils/scripts/eda.py
|
3
|
3524
|
"""
This script plots some exploratory analysis plots for the raw and filtered data:
- Moisaic of the mean voxels values for each brain slices
Run with:
python eda.py
from this directory
"""
from __future__ import print_function, division
import sys, os, pdb
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
sys.path.append(os.path.join(os.path.dirname(__file__), "./"))
from plot_mosaic import *
from mask_filtered_data import *
# Locate the paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
path_dict = {'data_filtered':{
'type' : 'filtered',
'feat' : '.feat',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/'
},
'data_original':{
'type' : '',
'feat': '',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/'
}}
#subject_list = [str(i) for i in range(1,17)]
#run_list = [str(i) for i in range(1,4)]
# Run only for subject 1 and 5 - run 1
run_list = [str(i) for i in range(1,2)]
subject_list = ['1','5']
# set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
# Create the needed directories if they do not exist
dirs = [project_path+'fig/',\
project_path+'fig/BOLD']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Template to plot the unmasked filetered data
template_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii'
# Progress bar
l = len(subject_list)*len(run_list)
sys.stdout.write("Starting EDA analysis\n")
sys.stdout.write("EDA: ")
sys.stdout.flush()
# Loop through the data type - raw or filtered
for dat in path_dict:
d_path = path_dict[dat]
# Set the data name and paths
images_paths = [('ds005' + '_sub' + s.zfill(3) + '_t1r' + r, \
data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])) \
for r in run_list \
for s in subject_list]
for image_path in images_paths:
name = image_path[0]
data_int = nib.load(image_path[1]).get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
# Plot
if d_path['type']=='filtered':
Transpose=False
template_data_int = nib.load(template_path).get_data()
template_data = template_data_int.astype(float)
plt.imshow(\
plot_mosaic(template_data, transpose=Transpose), \
cmap='gray', alpha=1)
else:
in_brain_mask = mean_data > 375
Transpose=True
plt.contour(\
plot_mosaic(in_brain_mask, transpose=Transpose), \
cmap='gray' , alpha=1)
plt.imshow(\
plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.colorbar()
plt.title('Voxels mean values' + '\n' + (d_path['type'] + str(name)))
plt.savefig(project_path+'fig/BOLD/%s_mean_voxels.png'\
%(d_path['type'] + str(name)))
#plt.show()
plt.clf()
plt.close()
sys.stdout.write("\n\b=")
sys.stdout.flush()
sys.stdout.write("======================================\n")
sys.stdout.write("EDA analysis done\n")
sys.stdout.write("Mosaic plots in project_epsilon/fig/BOLD/ \n")
|
bsd-3-clause
|
elijah513/scikit-learn
|
examples/classification/plot_lda_qda.py
|
164
|
4806
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.lda import LDA
from sklearn.qda import QDA
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
xmin, xmax = X[:, 0].min(), X[:, 0].max()
ymin, ymax = X[:, 1].min(), X[:, 1].max()
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# LDA
lda = LDA(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# QDA
qda = QDA()
y_pred = qda.fit(X, y, store_covariances=True).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('LDA vs QDA')
plt.show()
|
bsd-3-clause
|
wavelets/pandashells
|
pandashells/test/p_df_test.py
|
7
|
5636
|
#! /usr/bin/env python
import os
import subprocess
import tempfile
from mock import patch, MagicMock
from unittest import TestCase
import pandas as pd
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from pandashells.bin.p_df import (
needs_plots,
get_modules_and_shortcuts,
framify,
process_command,
)
class NeedsPlots(TestCase):
def test_doesnt_need_plots(self):
command_list = ['df.reset_index()', 'df.head()']
self.assertFalse(needs_plots(command_list))
def test_needs_plots(self):
command_list = ['set_xlim([1, 2])']
self.assertTrue(needs_plots(command_list))
class GetModulesAndShortcutsTests(TestCase):
def test_no_extra_needed(self):
command_list = ['df.reset_index()', 'df.head()']
self.assertEqual(
set(get_modules_and_shortcuts(command_list)),
{
('pandas', 'pd'),
('dateutil', 'dateutil'),
}
)
def test_get_extra_import_all_needed(self):
command_list = [
'pl.plot(df.x)',
'sns.distplot(df.x)',
'scp.stats.norm(1, 1)',
'np.random.randn(1)'
]
self.assertEqual(
set(get_modules_and_shortcuts(command_list)),
{
('dateutil', 'dateutil'),
('pandas', 'pd'),
('scipy', 'scp'),
('pylab', 'pl'),
('seaborn', 'sns'),
('numpy', 'np'),
},
)
class FramifyTests(TestCase):
def test_dataframe_to_dataframe(self):
cmd = ''
df = pd.DataFrame([{'a': 1}])
out = framify(cmd, df)
self.assertTrue(isinstance(out, pd.DataFrame))
def test_series_to_dataframe(self):
cmd = ''
df = pd.Series({'a': 1})
out = framify(cmd, df)
self.assertTrue(isinstance(out, pd.DataFrame))
def test_list_to_dataframe(self):
cmd = ''
df = [1, 2, 3]
out = framify(cmd, df)
self.assertTrue(isinstance(out, pd.DataFrame))
@patch('pandashells.bin.p_df.sys')
def test_number_to_dataframe(self, sys_mock):
cmd = ''
df = 7
sys_mock.stderr = MagicMock(write=MagicMock())
sys_mock.exit = MagicMock()
framify(cmd, df)
self.assertTrue(sys_mock.stderr.write.called)
self.assertTrue(sys_mock.exit.called)
class ProcessCommandTests(TestCase):
def setUp(self):
self.df = pd.DataFrame([
{'a': 1, 'b': 10},
{'a': 2, 'b': 20},
{'a': 3, 'b': 30},
{'a': 4, 'b': 40},
])
def test_col_assignement(self):
args = MagicMock()
cmd = 'df["c"] = 2 * df["a"]'
df = process_command(args, cmd, self.df)
self.assertEqual(df.c.iloc[0], 2)
@patch('pandashells.bin.p_df.sys')
@patch('pandashells.bin.p_df.exec_plot_command')
def test_plot_needed(self, exec_plot_mock, sys_mock):
args = MagicMock()
sys_mock.exit = MagicMock()
cmd = 'df.plot(x="a", y="b")'
process_command(args, cmd, self.df)
self.assertTrue(exec_plot_mock.called)
self.assertTrue(sys_mock.exit.called)
def test_regular_command(self):
args = MagicMock()
cmd = 'df.a.value_counts()'
df = process_command(args, cmd, self.df)
self.assertEqual(set(df.index), {1, 2, 3, 4})
self.assertEqual(set(df[0]), {1})
class IntegrationTests(TestCase):
def setUp(self):
self.df = pd.DataFrame([
{'a': 1, 'b': 10},
{'a': 2, 'b': 20},
{'a': 3, 'b': 30},
{'a': 4, 'b': 40},
])
def get_command_result(self, cmd, as_table=False):
p = subprocess.Popen(
['bash', '-c', cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if as_table:
stdout, stderr = p.communicate(
self.df.to_string(index=False).encode('utf-8'))
else:
stdout, stderr = p.communicate(
self.df.to_csv(index=False).encode('utf-8'))
return stdout.decode('utf-8').strip()
def test_no_command(self):
cmd = 'p.df'
df = pd.read_csv(StringIO(self.get_command_result(cmd)))
self.assertEqual(list(df.a), [1, 2, 3, 4])
def test_names(self):
cmd = 'p.df --names x y'
df = pd.read_csv(StringIO(self.get_command_result(cmd)))
self.assertEqual(list(df.columns), ['x', 'y'])
def test_multiple_commands(self):
cmd = """p.df 'df["y"] = -df.y' 'df["z"] = df["y"]' --names x y"""
df = pd.read_csv(StringIO(self.get_command_result(cmd)))
self.assertTrue(all(df.z < 0))
def test_input_table(self):
cmd = 'p.df -i table'
df = pd.read_csv(StringIO(
self.get_command_result(cmd, as_table=True)))
self.assertEqual(list(df.columns), ['a', 'b'])
def test_output_table(self):
cmd = 'p.df -o table'
df = pd.read_csv(
StringIO(self.get_command_result(cmd)), delimiter=r'\s+')
self.assertEqual(list(df.columns), ['a', 'b'])
def test_plotting(self):
dir_name = tempfile.mkdtemp()
file_name = os.path.join(dir_name, 'deleteme.png')
cmd = """p.df 'df.plot(x="a", y="b")' --savefig {}""".format(file_name)
self.get_command_result(cmd)
file_existed = os.path.isfile(file_name)
os.system('rm -rf {}'.format(dir_name))
self.assertTrue(file_existed)
|
bsd-2-clause
|
quheng/scikit-learn
|
examples/svm/plot_svm_nonlinear.py
|
268
|
1091
|
"""
==============
Non-linear SVM
==============
Perform binary classification using non-linear SVC
with RBF kernel. The target to predict is a XOR of the
inputs.
The color map illustrates the decision function learned by the SVC.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-3, 3, 500),
np.linspace(-3, 3, 500))
np.random.seed(0)
X = np.random.randn(300, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
clf = svm.NuSVC()
clf.fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.show()
|
bsd-3-clause
|
jason-neal/equanimous-octo-tribble
|
octotribble/SpectralTools.py
|
1
|
8065
|
# SpectralTools.py
# Collection of useful tools for dealing with spectra:
from __future__ import division
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
def BERVcorr(wl, Berv):
"""Barycentric Earth Radial Velocity correction from tapas.
A wavelength W0 of the original spectrum is seen at wavelength W1 in the spectrometer stretched by Doppler effect:
W1 / W0 = (1+ Vr/c) =(1- Berv/c)
Therefore, if there is a BERV correction in the pipeline of the spectrometer to recover W0, the measure wavelength
W1 has be transformed in W0 with the formula:
W0 = W1 (1+ Berv/c)
Inputs:
W1 - the spctrum in spectrograph frame.
Berv - The Barycentric Earth Radial Velocity value for this observation in km/s
Output:
W0. - BERV corrected wavelength values
Note:
pyasl.dopplerShift is much smoother than this function so use that instead.
"""
c = 299792.458 # km/s
return wl * (1 + Berv / c)
def inverse_BERV(w1, Berv):
"""Obtain un-BERV corrected wavelengths."""
c = 299792.458 # km/s
return w1 * (1 - Berv / c)
# def dopplershift(): # standard doppler correction
# return None
def air2vac(air):
"""Conversion of air wavelenghts to vacuum wavelenghts.
Adapted from wcal from Pat Hall's IRAF tasks for displaying SDSS spectra
Input: Air wavelengths in nm
Output: Vacuum wavelengths in nm
"""
print("Probably best to use pyastronomy versions !!!!!!!!!!!!!!!!!!!!!!!!")
air_in_angstroms = air * 10
sigma2 = (10**8) / air**2
n = 1 + 0.000064328 + 0.0294981 / (146 - sigma2) + 0.0002554 / (41 - sigma2)
vacuum_in_angstroms = air * n
if (min(air_in_angstroms) < 1600):
print("# WARNING! formula intended for use only at >1600 Ang!")
return vacuum_in_angstroms / 10
def vac2air(vac):
"""Conversion of vacuum wavelenghts to air wavelenghts.
From http://classic.sdss.org/dr7/products/spectra/vacwavelength.html
AIR = VAC / (1.0 + 2.735182E-4 + 131.4182 / VAC^2 + 2.76249E8 / VAC^4) given in Morton (1991, ApJS, 77, 119)
Better correction for infrared may be found in
http://adsabs.harvard.edu/abs/1966Metro...2...71E
and
http://adsabs.harvard.edu/abs/1972JOSA...62..958P
"""
print("Probably best to use pyastronomy versions !!!!!!!!!!!!!!!!!!!!!!!!")
vac_in_angstroms = vac * 10
air_in_angstroms = vac_in_angstroms / (1.0 + 2.735182E-4 + 131.4182 / vac_in_angstroms**2 +
2.76249E8 / vac_in_angstroms**4)
# Need to look at these for nir compatbile formular
return air_in_angstroms / 10
def wav_selector(wav, flux, wav_min, wav_max, verbose=False):
"""Fast Wavelength selector between wav_min and wav_max values.
If passed lists it will return lists.
If passed np arrays it will return arrays
"""
if isinstance(wav, list): # If passed lists
wav_sel = [wav_val for wav_val in wav if (wav_min < wav_val < wav_max)]
flux_sel = [flux_val for wav_val, flux_val in zip(wav, flux) if (wav_min < wav_val < wav_max)]
elif isinstance(wav, np.ndarray):
# Super Fast masking with numpy
mask = (wav > wav_min) & (wav < wav_max)
wav_sel = wav[mask]
flux_sel = flux[mask]
if verbose:
print("mask=", mask)
print("len(mask)", len(mask))
print("wav", wav)
print("flux", flux)
else:
raise TypeError("Unsupported input wav type")
return [wav_sel, flux_sel]
# Wavelength Interplation from telluric correct
def wl_interpolation(wl, spec, ref_wl, method="scipy", kind="linear", verbose=False):
"""Interpolate Wavelengths of spectra to common WL.
Most likely convert telluric to observed spectra wl after wl mapping performed
"""
v_print = print if verbose else lambda *a, **k: None
starttime = time.time()
if method == "scipy":
v_print(kind + " scipy interpolation")
linear_interp = interp1d(wl, spec, kind=kind)
new_spec = linear_interp(ref_wl)
elif method == "numpy":
if kind.lower() is not "linear":
v_print("Warning: Cannot do " + kind + " interpolation with numpy, switching to linear")
v_print("Linear numpy interpolation")
new_spec = np.interp(ref_wl, wl, spec) # 1-d peicewise linear interpolat
else:
v_print("Method was given as " + method)
raise("Interpolation method not correct")
v_print("Interpolation Time = " + str(time.time() - starttime) + " seconds")
return new_spec # test inperpolations
###################################################################
# Convolution
###################################################################
def unitary_Gauss(x, center, FWHM):
"""Gaussian_function of area=1.
p[0] = A
p[1] = mean
p[2] = FWHM
"""
sigma = np.abs(FWHM) / (2 * np.sqrt(2 * np.log(2)))
amp = 1.0 / (sigma * np.sqrt(2 * np.pi))
tau = -((x - center)**2) / (2 * (sigma**2))
result = amp * np.exp(tau)
return result
def fast_convolve(wav_val, R, wav_extended, flux_extended, fwhm_lim):
"""IP convolution multiplication step for a single wavelength value."""
FWHM = wav_val / R
index_mask = (wav_extended > (wav_val - fwhm_lim * FWHM)) & (wav_extended < (wav_val + fwhm_lim * FWHM))
flux_2convolve = flux_extended[index_mask]
IP = unitary_Gauss(wav_extended[index_mask], wav_val, FWHM)
sum_val = np.sum(IP * flux_2convolve)
unitary_val = np.sum(IP * np.ones_like(flux_2convolve)) # Effect of convolution onUnitary.
return sum_val / unitary_val
def instrument_convolution(wav, flux, chip_limits, R, fwhm_lim=5.0, plot=True, verbose=True):
"""Convolution code adapted from pedros code and speed up with np mask logic."""
# CRIRES HDR vals for chip limits don't match well with calibrated values (get interpolation out of range error)
# So will use limits from the obs data instead
# wav_chip, flux_chip = chip_selector(wav, flux, chip)
wav_chip, flux_chip = wav_selector(wav, flux, chip_limits[0], chip_limits[1])
# we need to calculate the FWHM at this value in order to set the starting point for the convolution
FWHM_min = wav_chip[0] / R # FWHM at the extremes of vector
FWHM_max = wav_chip[-1] / R
# wide wavelength bin for the resolution_convolution
wav_extended, flux_extended = wav_selector(wav, flux, wav_chip[0] - fwhm_lim * FWHM_min,
wav_chip[-1] + fwhm_lim * FWHM_max, verbose=False)
# isinstance check is ~100 * faster then arraying the array again.
if not isinstance(wav_extended, np.ndarray):
wav_extended = np.array(wav_extended, dtype="float64")
if not isinstance(flux_extended, np.ndarray):
flux_extended = np.array(flux_extended, dtype="float64")
if verbose:
print("Starting the Resolution convolution...")
# Predefine np array space
flux_conv_res = np.empty_like(wav_chip, dtype="float64")
counter = 0
base_val = len(wav_chip) // 20 # Adjust here to change % between reports
for n, wav in enumerate(wav_chip):
# put value directly into the array
flux_conv_res[n] = fast_convolve(wav, R, wav_extended, flux_extended, fwhm_lim)
if (n % base_val == 0) and verbose:
counter = counter + 5
print("Resolution Convolution at {0}%%...".format(counter))
if verbose:
print("Done.\n")
if(plot):
fig = plt.figure(1)
plt.xlabel(r"wavelength [ $\mu$m ])")
plt.ylabel(r"flux [counts] ")
plt.plot(wav_chip, flux_chip / np.max(flux_chip), color='k', linestyle="-", label="Original spectra")
plt.plot(wav_chip, flux_conv_res / np.max(flux_conv_res), color='b', linestyle="-",
label="Spectrum observed at and R=%d ." % (R))
plt.legend(loc='best')
plt.show(fig)
return [wav_chip, flux_conv_res]
|
mit
|
ahoyosid/scikit-learn
|
sklearn/neighbors/tests/test_ball_tree.py
|
3
|
10258
|
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius(query_pt, r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius(query_pt, r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
if __name__ == '__main__':
import nose
nose.runmodule()
|
bsd-3-clause
|
LEX2016WoKaGru/pyClamster
|
pyclamster/coordinates.py
|
1
|
38336
|
# -*- coding: utf-8 -*-
"""
Created on 25.06.2016
Created for pyclamster
Copyright (C) {2016}
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# System modules
import logging
import copy
# External modules
import numpy as np
import numpy.ma as ma
# Internal modules
from . import utils
__version__ = "0.1"
# create logger
logger = logging.getLogger(__name__)
##########################
### Calculation Method ###
##########################
class CalculationMethod(object):
def __init__(self,input,output,func):
self.input = set()
self.output = set()
if isinstance(input,str): self.input.add(input)
else: self.input.update(input)
if isinstance(output,str): self.output.add(output)
else: self.output.update(output)
self.func = func
# check if this calculation method can be applied on quantities
def applicable(self,quantities):
return all(x in quantities for x in self.input)
# when this method is called
def __call__(self):
self.func() # call function
# summary on stringification
def __str__(self):
lines = []
lines.append("{func}".format(func=self.func))
lines.append("{o} <- function of {i}".format(
o=",".join(self.output),
i=",".join(self.input)))
return "\n".join(lines)
##############################
### Calculation Method set ###
##############################
class CalculationMethodSet(object):
def __init__(self, *methods):
self.methods = []
for method in utils.flatten(methods):
self.addmethod(method)
# tell what you use to calculate what
self.VERBOSE = False
################################################################
### make the method set behave correct in certain situations ###
################################################################
# make it iterable
def __iter__(self):
try: del self.current # reset counter
except: pass
return self
def __next__(self):
try: self.current += 1 # try to count up
except: self.current = 0 # if that didn't work, start with 0
if self.current >= len(self.methods):
del self.current # reset counter
raise StopIteration # stop the iteration
else:
return self.methods[self.current]
# make it indexable
def __getitem__(self,key):
return self.methods[key]
# make it return something in boolean context
def __bool__(self): # return value in boolean context
return len(self.methods) > 0
# make it callable
def __call__(self): # when this set is called
for method in self.methods: # loop over all methods
if self.VERBOSE:
logger.debug("using {i} to calculate {o}".format(i=method.input,
o=method.output))
method() # call the method
# summary if converted to string
def __str__(self):
lines = []
lines.extend(["==============================",
"| set of calculation methods |",
"=============================="])
n = len(self.methods)
if n > 0:
lines.append("{n} calculation method{s}:".format(
n=n,s='s' if n!=1 else ''))
for i,method in enumerate(self.methods):
lines.append("\nMethod Nr. {i}/{n}:\n{m}".format(i=i+1,
m=method,n=n))
else:
lines.append("no calculation methods\n")
return "\n".join(lines)
###################################
### managing methods in the set ###
###################################
def addmethod(self, method):
self.methods.append(method)
def add_new_method(self, input, output, func):
# create new method
method = CalculationMethod(input=input,output=output,func=func)
self.addmethod(method) # add the method
def removemethod(self, method):
self.methods.remove(method)
############################################
### getting information from the methods ###
############################################
# given a set of quantity names, determine which methods can be
# applied DIRECTLY on them
def applicable_methods(self, quantities):
methods = CalculationMethodSet() # new empty set
for method in self.methods: # loop over all methods
# check if method is applicable and add it to list if yes
if method.applicable(quantities):
methods.addmethod(method)
return methods
# given a set of quantity names, determine which methods yield
# any of these quantities DIRECTLY
def methods_yield(self, quantities):
methods = CalculationMethodSet() # new empty set
for method in self.methods: # loop over all methods
# check if method is applicable and add it to list if yes
if any(q in method.output for q in quantities):
methods.addmethod(method)
return methods
# return all calculatable quantities of this set
@property
def all_calculatable_quantities(self):
q = set()
for m in self: q.update(m.output)
return q
# return all needed quantities of this set
@property
def all_needed_quantities(self):
q = set()
for m in self: q.update(m.input)
return q
# given a set of quantity names, determine which other quantities can be
# calculated based DIRECTLY on it
def directly_calculatable_quantities(self, quantities):
# get the applicable methods
applicable = self.applicable_methods(quantities)
# return all calculatable quantities
return applicable.all_calculatable_quantities
# given a set of quantity names, determine which other quantities can
# DIRECTLY calculate these quantities
def quantities_can_calculate(self, quantities):
# get all methods that yield any of the given quantities
methods = self.methods_yield(quantities)
# return all the needed quantities for this
return methods.all_needed_quantities
# given a set of quantity names, construct a calculation method set
# with the correct order to calculate as much other quantities as possible
def dependency_line(self, quantities):
known_quantities = set(quantities) # copy of given quantities
calculated = set(known_quantities) # already calculated quantities
line = CalculationMethodSet() # new empty set
while True:
# get the applicable methods at this stage
methods = self.applicable_methods(known_quantities)
for method in list(methods): # loop over (a copy of) all methods
if method.output.issubset(known_quantities) or \
method.output.issubset(calculated): # if we know it
methods.removemethod(method) # don't consider it
calculated.update(method.output) # update calculated quants
if methods: # if something can be calculated
known_quantities.update(methods.all_calculatable_quantities)
# extend the line with the methods
for method in methods:
line.addmethod(method)
else: break # nothing found, abort
return line
###############################
### classes for coordinates ###
###############################
class BaseCoordinates3d(object):
def __init__(self, dimnames, paramnames=[], shape=None):
# initialize base variables
self._dim_names = dimnames
self._param_names = paramnames
# initialize shape
self.shape = shape
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, newshape): # if the shape of the coordinates is set
"""
set shape of coordinates
args:
newshape (tuple of int): new shape of coordinates. If newshape is
None, all dimensions are set to None. If a reshape of the
dimensions is possible, a reshape is performed on all
dimensions. I a reshape is not possible, all dimensions
are initialized with completely masked empty arrays of the
new shape. If newshape is equal the old shape, do nothing.
"""
self._shape = newshape # set new shape
### loop over all dimensions ###
for dim in self._dim_names: # loop over all dimensions
try: shape = getattr(self, dim).shape # try to read shape
except: shape = None # if not yet defined, use None
if newshape is None: # newshape is None
### new shape is None --> set everything to None ###
setattr(self, "_{}".format(dim), None)
#logger.debug("newshape is None, setting {} to None".format(dim))
else: # newshape is not None
### new shape is not None --> further investigation ###
if np.prod(newshape) == np.prod(self.shape) and \
not shape is None and not newshape == self.shape:
### reshape is possible --> reshape! ###
# try to reshape current content
try:
new = getattr(self, dim).reshape(newshape) # try
#logger.debug( " ".join([
#"reshaping {dim} from shape {shape}",
#"to shape {newshape}",
#]).format(dim=dim,newshape=newshape,shape=shape))
except: # dimension was not yet defined --> use empty
#logger.debug(" ".join([
#"can't reshape {dim} from shape {shape}",
#"to shape {newshape}.",
#"setting {dim} to empty array of shape {newshape}."
#]).format(dim=dim,newshape=newshape,shape=shape))
new = ma.masked_array(
data = np.empty(newshape),
mask = np.ones( newshape))
# reshape variable
setattr(self, "_{}".format(dim), new)
else: # reshape not possible
### reshape NOT possible
### --> reinit with empty arrays if oldshape does not match
if shape != newshape: # only if new shape does not match
# set to an empty array
setattr(self, "_{}".format(dim), ma.masked_array(
data = np.empty(newshape),
mask = np.ones( newshape)))
#logger.debug( " ".join([
#"setting {dim} to completely masked array of shape",
#"{newshape} because shape {dimshape} didn't match",
#"newshape {newshape}."
#]).format(dim=dim,newshape=newshape,dimshape=shape))
# set the coordinate to a new value
def _set_coordinate(self, coord, value):
"""
Set the coordinate 'coord' to value 'value'. The value is converted
to an array or expanded to an array of appropriate shape if value
only has length 1.
If the other coordinates are undefined, set them to empty masked arrays
of appropriate shape.
args:
coord (str): name of the coord attribute
value (array_like or single numeric): new coordinate array.
Must be of shape self.shape.
"""
#logger.debug("attempt to set coordinate {} to {}.".format(coord,value))
# find out names of remaining two dimensions
i = self._dim_names.index(coord)
otherdims = self._dim_names[:i] + self._dim_names[(i+1):]
if not value is None: # if value is not None
# make sure value is an array
value = ma.asanyarray(value) # try to convert to array
# check shape
if not self.shape is None: # if shape is defined
if np.prod(value.shape) == 1: # only one value was given
# filled constant array
value = np.full( self.shape, value, np.array(value).dtype)
elif np.prod(value.shape) == np.prod(self.shape):
# reshape
value = value.reshape(self.shape)
elif value.shape != self.shape: # value shape does not match
raise ValueError(
"invalid shape {} (not {}) of new coordinate {}".format(
value.shape, self.shape, coord))
else: # shape is not defined yet
self.shape = value.shape # set it!
resval = value # this value
# set other dims to completely masked array if necessary
for dim in otherdims: # loop over all other dimensions
try: dimval = getattr(self, dim) # try to read current dimval
except: dimval = None # if not yet defined, use None
if dimval is None: # if current dimval is not defined
setattr(self, "_{}".format(dim), ma.masked_array(
data = np.empty(self.shape),
mask = np.ones( self.shape)))
else: # specified value is None
if self.shape is None: # if no shape was defined yet
resval = None # just set this dimension to None
else: # shape is defined, set to empty array of appropriate shape
resval = ma.masked_array(
data = np.empty(self.shape),
mask = np.ones( self.shape))
#logger.debug(
#"setting {} to completely masked arrays of shape {} ".format(
#",".join(self._dim_names),self.shape))
# set resulting value
#logger.debug("setting {} to {}".format(coord,resval))
setattr(self, "_{}".format(coord), resval)
try: # try this because resval can be None...
if self.shape != resval.shape:
#logger.debug("Adjusting shape from {} to {}".format(self.shape,
#resval.shape))
self.shape = resval.shape
except: pass
# crop coordinates to a box
def crop(self, box):
"""
crop the coordinates in-place to a box
args:
box (4-tuple of int): (left, top, right, bottom)
"""
for dim in self._dim_names: # loop over all dimensions
new = getattr(self, dim)[box[1]:box[3], box[0]:box[2]]
# set underlying coordinate directly
setattr(self, "_{}".format(dim) , new)
# cut out a box
def cut(self, box):
"""
cut the coordinates to a box and return it
args:
box (4-tuple of int): (left, top, right, bottom)
return:
coordinates = copied and copped instance
"""
new = copy.deepcopy(self) # copy
new.crop(box) # crop
return new # return
########################################
### convenient class for coordinates ###
########################################
class Coordinates3d(BaseCoordinates3d):
def __init__(self, shape=None, azimuth_offset=0, azimuth_clockwise=False,
elevation_type='zenith',**dimensions):
# parent constructor
super().__init__(
shape=shape,
dimnames = ['x','y','z','radiush','elevation','azimuth','radius'],
paramnames =['azimuth_clockwise','azimuth_offset','elevation_type']
)
self._max_print = 10 # maximum values to print
# define methods
self.methods = CalculationMethodSet()
# add methods
self.methods.add_new_method(output='radius',input={'x','y','z'},
func=self.radius_from_xyz)
self.methods.add_new_method(output='radiush',input={'x','y'},
func=self.radiush_from_xy)
self.methods.add_new_method(output='radius',input={'radiush','z'},
func=self.radius_from_radiush_z)
self.methods.add_new_method(output='radius',input={'elevation',
'radiush'}, func=self.radius_from_elevation_radiush)
self.methods.add_new_method(output='azimuth',input={'x','y'},
func=self.azimuth_from_xy)
self.methods.add_new_method(output='elevation',input={'radiush','z'},
func=self.elevation_from_radiush_z)
self.methods.add_new_method(output='x',input={'azimuth','elevation',
'radius'}, func=self.x_from_spherical)
self.methods.add_new_method(output='x',input={'azimuth','radiush'},
func=self.x_from_azimuth_radiush)
self.methods.add_new_method(output='y',input={'azimuth','elevation',
'radius'}, func=self.y_from_spherical)
self.methods.add_new_method(output='y',input={'azimuth','radiush'},
func=self.y_from_azimuth_radiush)
self.methods.add_new_method(output='z',input={'azimuth','elevation',
'radius'}, func=self.z_from_spherical)
self.methods.add_new_method(output='x',input={'radiush','y'},
func=self.x_from_radiush_y)
self.methods.add_new_method(output='y',input={'radiush','x'},
func=self.y_from_radiush_x)
self.methods.add_new_method(output='z',input={'radius','radiush'},
func=self.z_from_radiusses)
self.methods.add_new_method(output='radiush',input={'elevation','z'},
func=self.radiush_from_elevation_z)
self.methods.add_new_method(output='radiush',input={'elevation',
'radius'}, func=self.radiush_from_elevation_radius)
self.methods.add_new_method(output='elevation',input={'radius',
'radiush'}, func=self.elevation_from_radiusses)
self.methods.add_new_method(output='elevation',input={'radius','z'},
func=self.elevation_from_radius_z)
# initially set parameters
self.change_parameters(
azimuth_clockwise = azimuth_clockwise,
azimuth_offset = azimuth_offset,
elevation_type = elevation_type
)
# fill with given dimensions
if dimensions:
self.fill(**dimensions)
@property
def azimuth_clockwise(self): return self._azimuth_clockwise
@property
def azimuth_offset(self): return self._azimuth_offset
@property
def elevation_type(self): return self._elevation_type
@azimuth_clockwise.setter
def azimuth_clockwise(self, value):
self.change_parameters(azimuth_clockwise=value)
@azimuth_offset.setter
def azimuth_offset(self, value):
self.change_parameters(azimuth_offset=value)
@elevation_type.setter
def elevation_type(self, value):
self.change_parameters(elevation_type=value)
@property
def x(self): return self._x
@property
def y(self): return self._y
@property
def z(self): return self._z
@property
def azimuth(self): return self._azimuth
@property
def elevation(self): return self._elevation
@property
def radius(self): return self._radius
@property
def radiush(self): return self._radiush
@x.setter
def x(self, value): self.fill(x=value)
@y.setter
def y(self, value): self.fill(y=value)
@z.setter
def z(self, value): self.fill(z=value)
@azimuth.setter
def azimuth(self, value): self.fill(azimuth=value)
@elevation.setter
def elevation(self, value): self.fill(elevation=value)
@radius.setter
def radius(self, value): self.fill(radius=value)
@radiush.setter
def radiush(self, value): self.fill(radiush=value)
# determine which dimensions are defined
@property
def defined_dimensions(self):
defined = set()
for dim in self._dim_names:
isdefined = False
try: value = getattr(self, dim)
except: pass
if not value is None:
try: isdefined = not value.mask.all()
except AttributeError:
isdefined = True
if isdefined:
defined.add(dim)
return(defined)
# change parameters keeping some dimensions
def change_parameters(self,keep=set(),**parameters):
#logger.debug(
#"request to change parameters to {} while keeping {}".format(
#parameters,keep))
for param,val in parameters.items(): # loop over new parameters
# check value
if param == "elevation_type":
elevation_types = {'zenith','ground'}
if not val in elevation_types:
raise ValueError(
"wrong elevation type '{e}', has to be one of {t}".format(
e=val,t=elevation_types))
elif param == "azimuth_clockwise":
if not isinstance(val, bool):
raise ValueError("azimuth_clockwise has to be boolean.")
elif param == "azimuth_offset":
try: float(val)
except: raise ValueError("azimuth_offset has to be numeric.")
else:
raise AttributeError("parameter {} does not exist!".format(
param))
# make sure to only have sensible dimensions to keep
try: keep = keep.intersection(self._dim_names)
except: raise TypeError("keep has to be a set of dimension names.")
# set the underlying attribute
setattr(self,"_{}".format(param),val)
# empty all unwanted dimensions
notkept = set(self._dim_names).symmetric_difference(keep)
#logger.debug("empty {}, because not kept".format(notkept))
for dim in notkept:
self._set_coordinate(dim,None) # empty this dimension
# now calculate everything based on the dimensions to keep
self.fill_dependencies(keep)
# given specific values for some dimensions, calculate all others
def fill_dependencies(self,dimensions):
# get the depenency line
dependency_line = self.methods.dependency_line(dimensions)
# do everything in the dependency line
dependency_line() # call
# set as much variables as you can based on given dimensions and already
# defined dimensions
def fill(self, **dimensions):
#logger.debug("request to set {}".format(dimensions))
# if nothing was given to fill from, empty everything
if len(dimensions) == 0:
#logger.debug("filling from nothing. emptying all dimensions.")
shape = self.shape
self.shape = None
self.shape = shape
# first, unset all dimensions that reverse-depend on the new dimensions
for dim in dimensions.keys(): # loop over all new given dimensions
# get all methods that yield this new dimension
ms = set(); ms.add(dim)
methods = self.methods.methods_yield(ms)
#logger.debug("methods, that yield {}:".format(dim))
for m in methods: # loop over all methods that yield this new dim
# if for this method all information is already given
if m.input.issubset(self.defined_dimensions):
# unset everything needed for this method
# because this is the reverse dependency of this new
# dimensions
#logger.debug(" ".join([
#"unsetting {d}, because they are given",
#"and can calculate {dim}",
#]).format(m=m,d=m.input,dim=dim,i=m.input))
for d in m.input:
self._set_coordinate(d, None)
# initially set all given variables
for dim, value in dimensions.items():
#logger.debug("setting {} directly to value {}".format(dim,value))
self._set_coordinate(dim, value)
# create a set of defined dimensions, updated with new given dimensions
merged = set(self.defined_dimensions)
merged.update(dimensions.keys())
# now fill the dependencies with all the information we have
self.fill_dependencies(merged)
#################
### operators ###
#################
def __add__(self, other):
if isinstance(other,type(self)):
newcoord = copy.deepcopy(self)
newcoord.fill(x=newcoord.x+other.x,y=newcoord.y-other.y,z=newcoord.z-other.z)
return newcoord
else:
raise TypeError('Can only add Coordinates3 class!')
def __sub__(self, other):
if isinstance(other,type(self)):
newcoord = copy.deepcopy(self)
newcoord.fill(x=newcoord.x-other.x,y=newcoord.y-other.y,z=newcoord.z-other.z)
return newcoord
else:
raise TypeError('Can only subtract Coordinates3 class!')
def __truediv__(self, other):
if isinstance(other,type(self)):
newcoord = copy.deepcopy(self)
newcoord.fill(x=newcoord.x/other.x,y=newcoord.y/other.y,z=newcoord.z/other.z)
return newcoord
else:
raise TypeError('Can only divide Coordinates3 class!')
def __mul__(self, other):
if isinstance(other,type(self)):
newcoord = copy.deepcopy(self)
newcoord.fill(x=newcoord.x*other.x,y=newcoord.y*other.y,z=newcoord.z*other.z)
return newcoord
else:
raise TypeError('Can only multiply Coordinates3 class!')
###########################
### calculation methods ###
###########################
def radius_from_xyz(self):
self._radius = np.sqrt(self.x**2 + self.y**2 + self.z**2)
def radiush_from_xy(self):
self._radiush = np.sqrt(self.x**2 + self.y**2)
def radius_from_radiush_z(self):
self._radius = np.sqrt(self.radiush**2 + self.z**2)
def radius_from_elevation_radiush(self):
if self.elevation_type == "zenith":
self._radius = self.radiush / (np.sin( self.elevation )+1e-4)
elif self.elevation_type == "ground":
self._radius = self.radiush / (np.cos( self.elevation )+1e-4)
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def azimuth_from_xy(self):
north = self.azimuth_offset
azimuth_clockwise = self.azimuth_clockwise
north = - (north % (2*np.pi) )
if azimuth_clockwise:
north = - north
# note np.arctan2's way of handling x and y arguments:
# np.arctan2( y, x ), NOT np.arctan( x, y ) !
#
# np.arctan2( y, x ) returns the SIGNED (!)
# angle between positive x-axis and the vector (x,y)
# in radians
# the azimuth angle is...
# ...the SIGNED angle between positive x-axis and the vector...
# ...plus some full circle to only have positive values...
# ...minus angle defined as "NORTH" (modulo 2*pi to be precise)
# --> azi is not angle to x-axis but to NORTH
azimuth = np.arctan2(self.y, self.x) + 6 * np.pi + north
# take azimuth modulo a full circle to have sensible values
azimuth = azimuth % (2*np.pi)
if azimuth_clockwise: # turn around if azimuth_clockwise
azimuth = 2 * np.pi - azimuth
self._azimuth = azimuth
def elevation_from_radiush_z(self):
if self.elevation_type == "zenith":
self._elevation = np.arctan(self.radiush / (self.z+1e-4))
elif self.elevation_type == "ground":
self._elevation = np.arctan(self.z / (self.radiush+1e-4))
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def elevation_from_radius_z(self):
if self.elevation_type == "zenith":
self._elevation = np.arccos(self.z / (self.radius + 1e-4))
elif self.elevation_type == "ground":
self._elevation = np.arccos(self.radius / (self.z + 1e-4))
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def x_from_spherical(self):
azimuth = self.azimuth + self.azimuth_offset
if self.azimuth_clockwise: azimuth = 2*np.pi - azimuth
if self.elevation_type == "zenith":
self._x = self.radius \
* np.sin( self.elevation ) \
* np.cos( azimuth )
elif self.elevation_type == "ground":
self._x = self.radius \
* np.cos( self.elevation ) \
* np.cos( azimuth )
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def x_from_azimuth_radiush(self):
azimuth = self.azimuth + self.azimuth_offset
if self.azimuth_clockwise: azimuth = 2*np.pi - azimuth
self._x = self.radiush * np.cos( azimuth )
def y_from_spherical(self):
azimuth = self.azimuth + self.azimuth_offset
if self.azimuth_clockwise: azimuth = 2*np.pi - azimuth
if self.elevation_type == "zenith":
self._y = self.radius \
* np.sin( self.elevation ) \
* np.sin( azimuth )
elif self.elevation_type == "ground":
self._y = self.radius \
* np.cos( self.elevation ) \
* np.sin( azimuth )
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def y_from_azimuth_radiush(self):
azimuth = self.azimuth + self.azimuth_offset
if self.azimuth_clockwise: azimuth = 2*np.pi - azimuth
self._y = self.radiush * np.sin( azimuth )
def z_from_spherical(self):
if self.elevation_type == "zenith":
self._z = self.radius * np.cos( self.elevation )
elif self.elevation_type == "ground":
self._z = self.radius * np.sin( self.elevation )
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def x_from_radiush_y(self):
self._x = np.sqrt(self.radiush**2 - self.y**2)
def y_from_radiush_x(self):
self._y = np.sqrt(self.radiush**2 - self.x**2)
def z_from_radiusses(self):
self._z = np.sqrt(self.radius**2 - self.radiush**2)
def radiush_from_elevation_z(self):
if self.elevation_type == "zenith":
self._radiush = self.z * np.tan( self.elevation )
elif self.elevation_type == "ground":
self._radiush = np.tan( self.elevation ) / (self.z+1e-4)
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def radiush_from_elevation_radius(self):
if self.elevation_type == "zenith":
self._radiush = self.radius * np.sin( self.elevation )
elif self.elevation_type == "ground":
self._radiush = np.sin( self.elevation ) / (self.radius+1e-4)
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
def elevation_from_radiusses(self):
if self.elevation_type == "zenith":
self._elevation = np.arcsin( self.radiush / (self.radius + 1e-4) )
elif self.elevation_type == "ground":
self._elevation = np.arcsin( self.radius / (self.radiush + 1e-4) )
else:
raise Exception("unknown elevation type '{}'".format(
self.elevation_type))
###############################
### end calculation methods ###
###############################
######################
### string methods ###
######################
def _str_params(self):
fstr = []
fstr.append(" shape: {}".format(self.shape))
fstr.append(" elevation_type: {}".format(self.elevation_type))
fstr.append("azimuth_clockwise: {}".format(self.azimuth_clockwise))
fstr.append(" azimuth_offset: {}".format(self.azimuth_offset))
return("\n".join(fstr))
# summary when converted to string
def __str__(self):
def ndstr(a, format_string ='{0: 9.3f}'):
string = []
for i,v in enumerate(a):
if v is np.ma.masked:
string.append(" -- ")
else:
string.append(format_string.format(v,i))
return "|".join(string)
formatstring = ["==================",
"| 3d coordinates |",
"=================="]
formatstring.append(self._str_params())
formatstring.append("=====================")
for dim in self._dim_names:
value = getattr(self, dim)
isdefined = False
if not value is None:
try: isdefined = not np.ma.masked_invalid(value).mask.all()
except AttributeError:
isdefined = True
if isdefined:
try:
if np.prod(self.shape) < self._max_print:
string = str(ndstr(value.flatten()))
else: string = "defined"
except: raise
else:
string = "empty"
formatstring.append("{:>11}: {}".format(dim,string))
return("\n".join(formatstring))
####################
### Plot methods ###
####################
def plot(self, arrows = False, numbers=True):
"""
create a matplotlib plot of the coordinates.
The plot has then to be shown.
returns:
plot = (unshown) matplotlib plot
"""
try: # try to import matplotlib
import matplotlib.pyplot as plt
except ImportError:
raise NotImplementedError(" ".join([
"Plotting coordinates not possible because",
"matplotlib could not be found."]))
p = plt.figure()
# all of the following is necessary to enlarge the
# axis to a tiny extent, so that the points are fully visible
# ... unbelievable that this is not the default
axwidth = 1.1
xmin = np.nanmin(self.x)
ymin = np.nanmin(self.y)
xmax = np.nanmax(self.x)
ymax = np.nanmax(self.y)
xmean = np.mean([xmin,xmax])
ymean = np.mean([ymin,ymax])
xd = np.abs(xmax - xmin)
yd = np.abs(ymax - ymin)
xlim = [ min(0,xmean - axwidth * xd/2), max(0,xmean + axwidth * xd/2) ]
ylim = [ min(0,ymean - axwidth * yd/2), max(0,ymean + axwidth * yd/2) ]
plt.xlim(xlim)
plt.ylim(ylim)
# the axis ranges are now set... what a mess...
# equally-spaced axes
plt.axes().set_aspect('equal', 'datalim')
# x=0 and y=0
plt.axhline(y=0,ls='dashed',color='k')
plt.axvline(x=0,ls='dashed',color='k')
# grid
plt.grid(True)
# set the title
plt.title(self._str_params())
# show north angle
maxxy = 2*axwidth * np.abs(np.array([xmin,ymin,xmax,ymax])).max()
if self.azimuth_clockwise:
aox, aoy = np.cos(2*np.pi-self.azimuth_offset), \
np.sin(2*np.pi-self.azimuth_offset)
else:
aox, aoy = np.cos(self.azimuth_offset),np.sin(self.azimuth_offset)
plt.plot((0,maxxy*aox),(0,maxxy*aoy), 'r--',linewidth=4)
# plot the points
plt.plot(self.x,self.y,'o')
# draw arrows
for x,y,n in zip(self.x.flatten(),self.y.flatten(),
np.arange(np.size(self.x))+1):
if arrows:
plt.annotate(s='',xy=(x,y),xytext=(0,0),
arrowprops=dict(arrowstyle='->'))
if numbers:
plt.annotate(str(n),xy=(x,y))
return p
def plot3d(self,fig=None, method="scatter"):
"""
create a matplotlib 3d scatterplot of the coordinates.
The plot has then to be shown.
args:
method (string): scatter or line
returns:
Axes3D
"""
try: # try to import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
except ImportError:
raise NotImplementedError(" ".join([
"Plotting coordinates not possible because",
"matplotlib could not be found."]))
if fig is None:
fig = plt.figure()
ax = fig.gca(projection='3d')
if method == "scatter":
ax.scatter3D(self.x,self.y,self.z, label='cloud points')
elif method == "line":
ax.plot(self.x,self.y,self.z, label='cloud points')
else:
raise ValueError("unknown method '{}'".format(method))
return ax
|
gpl-3.0
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p94.py
|
1
|
3424
|
"""Bias computing hi/lo"""
import datetime
import numpy as np
import pandas as pd
import psycopg2.extras
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This plot looks at the effect of splitting a 24
hour period at different hours of the day. Using the hourly temperature
record, we can look at the bias of computing the daily high and low
temperature. Confusing? Assuming that the 'truth' is a daily high and
low computed at midnight, we can compare this value against 24 hour periods
computed for each hour of the day. This plot is one of the main reasons
that comparing climate data for a station that changed hour of day
observation over the years is problematic."""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
network="IA_ASOS",
label="Select Station:",
)
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("asos")
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
cursor.execute(
"""
WITH obs as (select valid at time zone %s + '10 minutes'::interval as v,
tmpf from alldata
WHERE station = %s and tmpf >= -90 and tmpf < 150),
s as (SELECT generate_series(0, 23, 1) || ' hours' as series),
daily as (select s.series, v + s.series::interval as t, tmpf from obs, s),
sums as (select series, date(t), max(tmpf), min(tmpf) from daily
GROUP by series, date)
SELECT series, avg(max), avg(min) from sums GROUP by series
""",
(ctx["_nt"].sts[station]["tzname"], station),
)
if cursor.rowcount == 0:
raise NoDataFound("No Data found.")
rows = []
hrs = range(25)
highs = [None] * 25
lows = [None] * 25
for row in cursor:
i = int(row[0].split()[0])
highs[24 - i] = row[1]
lows[24 - i] = row[2]
rows.append(dict(offset=(24 - i), avg_high=row[1], avg_low=row[2]))
rows.append(dict(offset=0, avg_high=highs[24], avg_low=lows[24]))
highs[0] = highs[24]
lows[0] = lows[24]
df = pd.DataFrame(rows)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
title = "[%s] %s %s-%s" % (
station,
ctx["_nt"].sts[station]["name"],
ab.year,
datetime.date.today().year,
)
subtitle = "Bias of 24 Hour 'Day' Split for Average High + Low Temp"
(fig, ax) = figure_axes(title=title, subtitle=subtitle)
ax.plot(
hrs, np.array(highs) - highs[0], label="High Temp", lw=2, color="r"
)
ax.plot(hrs, np.array(lows) - lows[0], label="Low Temp", lw=2, color="b")
ax.set_ylabel(r"Average Temperature Difference $^\circ$F")
ax.set_xlim(0, 24)
ax.set_xticks((0, 4, 8, 12, 16, 20, 24))
ax.set_xticklabels(("Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM", "Mid"))
ax.grid(True)
ax.set_xlabel("Hour Used for 24 Hour Summary")
ax.legend(loc="best")
return fig, df
if __name__ == "__main__":
plotter(dict())
|
mit
|
shusenl/scikit-learn
|
sklearn/ensemble/tests/test_bagging.py
|
72
|
25573
|
"""
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
|
bsd-3-clause
|
anntzer/scikit-learn
|
sklearn/linear_model/_ridge.py
|
5
|
77086
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from ._base import LinearClassifierMixin, LinearModel, _rescale_data
from ._sag import sag_solver
from ..base import RegressorMixin, MultiOutputMixin, is_classifier
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..utils.validation import _check_sample_weight
from ..utils.validation import _deprecate_positional_args
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..metrics import check_scoring
from ..exceptions import ConvergenceWarning
from ..utils.sparsefuncs import mean_variance_axis
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0,
X_offset=None, X_scale=None):
def _get_rescaled_operator(X):
X_offset_scale = X_offset / X_scale
def matvec(b):
return X.dot(b) - b.dot(X_offset_scale)
def rmatvec(b):
return X.T.dot(b) - X_offset_scale * np.sum(b)
X1 = sparse.linalg.LinearOperator(shape=X.shape,
matvec=matvec,
rmatvec=rmatvec)
return X1
n_samples, n_features = X.shape
if X_offset is None or X_scale is None:
X1 = sp_linalg.aslinearoperator(X)
else:
X1 = _get_rescaled_operator(X)
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coef, info = sp_linalg.cg(C, y_column, tol=tol, atol='legacy')
except TypeError:
# old scipy
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
# FIXME atol
try:
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol, atol='legacy')
except TypeError:
# old scipy
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info, ConvergenceWarning)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_features = X.shape[1]
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features], dtype=X.dtype)
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples], K.dtype)
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size), dtype=X.dtype)
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def _get_valid_accept_sparse(is_X_sparse, solver):
if is_X_sparse and solver in ['auto', 'sag', 'saga']:
return 'csr'
else:
return ['csr', 'csc', 'coo']
@_deprecate_positional_args
def ridge_regression(X, y, alpha, *, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False,
check_input=True):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {ndarray, sparse matrix, LinearOperator} of shape \
(n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
alpha : float or array-like of shape (n_targets,)
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
sample_weight : float or array-like of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For the 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' and saga solver, the default value is
1000.
tol : float, default=1e-3
Precision of the solution.
verbose : int, default=0
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
return_n_iter : bool, default=False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : bool, default=False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
check_input : bool, default=True
If False, the input arrays X and y will not be checked.
.. versionadded:: 0.21
Returns
-------
coef : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or ndarray of shape (n_targets,)
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
return _ridge_regression(X, y, alpha,
sample_weight=sample_weight,
solver=solver,
max_iter=max_iter,
tol=tol,
verbose=verbose,
random_state=random_state,
return_n_iter=return_n_iter,
return_intercept=return_intercept,
X_scale=None,
X_offset=None,
check_input=check_input)
def _ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False,
X_scale=None, X_offset=None, check_input=True):
has_sw = sample_weight is not None
if solver == 'auto':
if return_intercept:
# only sag supports fitting intercept directly
solver = "sag"
elif not sparse.issparse(X):
solver = "cholesky"
else:
solver = "sparse_cg"
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag', 'saga'):
raise ValueError("Known solvers are 'sparse_cg', 'cholesky', 'svd'"
" 'lsqr', 'sag' or 'saga'. Got %s." % solver)
if return_intercept and solver != 'sag':
raise ValueError("In Ridge, only 'sag' solver can directly fit the "
"intercept. Please change solver to 'sag' or set "
"return_intercept=False.")
if check_input:
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X), solver)
X = check_array(X, accept_sparse=_accept_sparse, dtype=_dtype,
order="C")
y = check_array(y, dtype=X.dtype, ensure_2d=False, order=None)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
if has_sw:
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if solver not in ['sag', 'saga']:
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha, dtype=X.dtype).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha,
max_iter=max_iter,
tol=tol,
verbose=verbose,
X_offset=X_offset,
X_scale=X_scale)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver in ['sag', 'saga']:
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features), dtype=X.dtype)
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ), dtype=X.dtype)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1),
dtype=X.dtype)}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i, 0,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init, is_saga=solver == 'saga')
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(LinearModel, metaclass=ABCMeta):
@abstractmethod
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
# all other solvers work at both float precision levels
_dtype = [np.float64, np.float32]
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),
self.solver)
X, y = self._validate_data(X, y,
accept_sparse=_accept_sparse,
dtype=_dtype,
multi_output=True, y_numeric=True)
if sparse.issparse(X) and self.fit_intercept:
if self.solver not in ['auto', 'sparse_cg', 'sag']:
raise ValueError(
"solver='{}' does not support fitting the intercept "
"on sparse data. Please set the solver to 'auto' or "
"'sparse_cg', 'sag', or set `fit_intercept=False`"
.format(self.solver))
if (self.solver == 'sag' and self.max_iter is None and
self.tol > 1e-4):
warnings.warn(
'"sag" solver requires many iterations to fit '
'an intercept with sparse inputs. Either set the '
'solver to "auto" or "sparse_cg", or set a low '
'"tol" and a high "max_iter" (especially if inputs are '
'not standardized).')
solver = 'sag'
else:
solver = 'sparse_cg'
else:
solver = self.solver
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
# when X is sparse we only remove offset from y
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight, return_mean=True)
if solver == 'sag' and sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = _ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver='sag',
random_state=self.random_state, return_n_iter=True,
return_intercept=True, check_input=False)
# add the offset which was subtracted by _preprocess_data
self.intercept_ += y_offset
else:
if sparse.issparse(X) and self.fit_intercept:
# required to fit intercept with sparse_cg solver
params = {'X_offset': X_offset, 'X_scale': X_scale}
else:
# for dense matrices or when intercept is set to 0
params = {}
self.coef_, self.n_iter_ = _ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False, check_input=False, **params)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(MultiOutputMixin, RegressorMixin, _BaseRidge):
"""Linear least squares with l2 regularization.
Minimizes the objective function::
||y - Xw||^2_2 + alpha * ||w||^2_2
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape (n_samples, n_targets)).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, ndarray of shape (n_targets,)}, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are
assumed to be specific to the targets. Hence they must correspond in
number.
fit_intercept : bool, default=True
Whether to fit the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. ``X`` and ``y`` are expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
tol : float, default=1e-3
Precision of the solution.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its improved, unbiased version named SAGA. Both methods also use an
iterative procedure, and are often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
All last five solvers support both dense and sparse data. However, only
'sag' and 'sparse_cg' supports sparse input when `fit_intercept` is
True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.17
`random_state` to support Stochastic Average Gradient.
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See Also
--------
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
:class:`~sklearn.kernel_ridge.KernelRidge` : Kernel ridge regression
combines ridge regression with the kernel trick.
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y)
Ridge()
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : returns an instance of self.
"""
return super().fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
This classifier first converts the target values into ``{-1, 1}`` and
then treats the problem as a regression task (multi-output regression in
the multiclass case).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float, default=1.0
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
max_iter : int, default=None
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
tol : float, default=1e-3
Precision of the solution.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga'}, \
default='auto'
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than 'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest and uses an iterative
procedure.
- 'sag' uses a Stochastic Average Gradient descent, and 'saga' uses
its unbiased and more flexible version named SAGA. Both methods
use an iterative procedure, and are often faster than other solvers
when both n_samples and n_features are large. Note that 'sag' and
'saga' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
.. versionadded:: 0.19
SAGA solver.
random_state : int, RandomState instance, default=None
Used when ``solver`` == 'sag' or 'saga' to shuffle the data.
See :term:`Glossary <random_state>` for details.
Attributes
----------
coef_ : ndarray of shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : None or ndarray of shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
classes_ : ndarray of shape (n_classes,)
The classes labels.
See Also
--------
Ridge : Ridge regression.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifier
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifier().fit(X, y)
>>> clf.score(X, y)
0.9595...
"""
@_deprecate_positional_args
def __init__(self, alpha=1.0, *, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super().__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier model.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : object
Instance of the estimator.
"""
_accept_sparse = _get_valid_accept_sparse(sparse.issparse(X),
self.solver)
X, y = self._validate_data(X, y, accept_sparse=_accept_sparse,
multi_output=True, y_numeric=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super().fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
def _check_gcv_mode(X, gcv_mode):
possible_gcv_modes = [None, 'auto', 'svd', 'eigen']
if gcv_mode not in possible_gcv_modes:
raise ValueError(
"Unknown value for 'gcv_mode'. "
"Got {} instead of one of {}" .format(
gcv_mode, possible_gcv_modes))
if gcv_mode in ['eigen', 'svd']:
return gcv_mode
# if X has more rows than columns, use decomposition of X^T.X,
# otherwise X.X^T
if X.shape[0] > X.shape[1]:
return 'svd'
return 'eigen'
def _find_smallest_angle(query, vectors):
"""Find the column of vectors that is most aligned with the query.
Both query and the columns of vectors must have their l2 norm equal to 1.
Parameters
----------
query : ndarray of shape (n_samples,)
Normalized query vector.
vectors : ndarray of shape (n_samples, n_features)
Vectors to which we compare query, as columns. Must be normalized.
"""
abs_cosine = np.abs(query.dot(vectors))
index = np.argmax(abs_cosine)
return index
class _X_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as centered and scaled X with an added intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]])
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_samples, n_features + 1))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
return safe_sparse_dot(
self.X, v[:-1], dense_output=True
) - self.sqrt_sw * self.X_mean.dot(v[:-1]) + v[-1] * self.sqrt_sw
def _matmat(self, v):
return (
safe_sparse_dot(self.X, v[:-1], dense_output=True) -
self.sqrt_sw[:, None] * self.X_mean.dot(v[:-1]) + v[-1] *
self.sqrt_sw[:, None])
def _transpose(self):
return _XT_CenterStackOp(self.X, self.X_mean, self.sqrt_sw)
class _XT_CenterStackOp(sparse.linalg.LinearOperator):
"""Behaves as transposed centered and scaled X with an intercept column.
This operator behaves as
np.hstack([X - sqrt_sw[:, None] * X_mean, sqrt_sw[:, None]]).T
"""
def __init__(self, X, X_mean, sqrt_sw):
n_samples, n_features = X.shape
super().__init__(X.dtype, (n_features + 1, n_samples))
self.X = X
self.X_mean = X_mean
self.sqrt_sw = sqrt_sw
def _matvec(self, v):
v = v.ravel()
n_features = self.shape[0]
res = np.empty(n_features, dtype=self.X.dtype)
res[:-1] = (
safe_sparse_dot(self.X.T, v, dense_output=True) -
(self.X_mean * self.sqrt_sw.dot(v))
)
res[-1] = np.dot(v, self.sqrt_sw)
return res
def _matmat(self, v):
n_features = self.shape[0]
res = np.empty((n_features, v.shape[1]), dtype=self.X.dtype)
res[:-1] = (
safe_sparse_dot(self.X.T, v, dense_output=True) -
self.X_mean[:, None] * self.sqrt_sw.dot(v)
)
res[-1] = np.dot(self.sqrt_sw, v)
return res
class _IdentityRegressor:
"""Fake regressor which will directly output the prediction."""
def decision_function(self, y_predict):
return y_predict
def predict(self, y_predict):
return y_predict
class _IdentityClassifier(LinearClassifierMixin):
"""Fake classifier which will directly output the prediction.
We inherit from LinearClassifierMixin to get the proper shape for the
output `y`.
"""
def __init__(self, classes):
self.classes_ = classes
def decision_function(self, y_predict):
return y_predict
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Leave-one-out Cross-Validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id).
Dual solution: c = G^-1y
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G^-1 = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KG^-1Y - diag(KG^-1)Y) / diag(I-KG^-1)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G^-1)
The best score (negative mean squared error or user-provided scoring) is
stored in the `best_score_` attribute, and the selected hyperparameter in
`alpha_`.
References
----------
http://cbcl.mit.edu/publications/ps/MIT-CSAIL-TR-2007-025.pdf
https://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *,
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False,
is_clf=False, alpha_per_target=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.is_clf = is_clf
self.alpha_per_target = alpha_per_target
@staticmethod
def _decomp_diag(v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
@staticmethod
def _diag_dot(D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _compute_gram(self, X, sqrt_sw):
"""Computes the Gram matrix XX^T with possible centering.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
gram : ndarray of shape (n_samples, n_samples)
The Gram matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
When X is dense the centering has been done in preprocessing
so the mean is 0 and we just compute XX^T.
When X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
center = self.fit_intercept and sparse.issparse(X)
if not center:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X, X.T, dense_output=True), X_mean
# X is sparse
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples))
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean *= n_samples / sqrt_sw.dot(sqrt_sw)
X_mX = sqrt_sw[:, None] * safe_sparse_dot(
X_mean, X.T, dense_output=True)
X_mX_m = np.outer(sqrt_sw, sqrt_sw) * np.dot(X_mean, X_mean)
return (safe_sparse_dot(X, X.T, dense_output=True) + X_mX_m
- X_mX - X_mX.T, X_mean)
def _compute_covariance(self, X, sqrt_sw):
"""Computes covariance matrix X^TX with possible centering.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
The preprocessed design matrix.
sqrt_sw : ndarray of shape (n_samples,)
square roots of sample weights
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The covariance matrix.
X_mean : ndarray of shape (n_feature,)
The weighted mean of ``X`` for each feature.
Notes
-----
Since X is sparse it has not been centered in preprocessing, but it has
been scaled by sqrt(sample weights).
When self.fit_intercept is False no centering is done.
The centered X is never actually computed because centering would break
the sparsity of X.
"""
if not self.fit_intercept:
# in this case centering has been done in preprocessing
# or we are not fitting an intercept.
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
return safe_sparse_dot(X.T, X, dense_output=True), X_mean
# this function only gets called for sparse X
n_samples = X.shape[0]
sample_weight_matrix = sparse.dia_matrix(
(sqrt_sw, 0), shape=(n_samples, n_samples))
X_weighted = sample_weight_matrix.dot(X)
X_mean, _ = mean_variance_axis(X_weighted, axis=0)
X_mean = X_mean * n_samples / sqrt_sw.dot(sqrt_sw)
weight_sum = sqrt_sw.dot(sqrt_sw)
return (safe_sparse_dot(X.T, X, dense_output=True) -
weight_sum * np.outer(X_mean, X_mean),
X_mean)
def _sparse_multidot_diag(self, X, A, X_mean, sqrt_sw):
"""Compute the diagonal of (X - X_mean).dot(A).dot((X - X_mean).T)
without explicitely centering X nor computing X.dot(A)
when X is sparse.
Parameters
----------
X : sparse matrix of shape (n_samples, n_features)
A : ndarray of shape (n_features, n_features)
X_mean : ndarray of shape (n_features,)
sqrt_sw : ndarray of shape (n_features,)
square roots of sample weights
Returns
-------
diag : np.ndarray, shape (n_samples,)
The computed diagonal.
"""
intercept_col = scale = sqrt_sw
batch_size = X.shape[1]
diag = np.empty(X.shape[0], dtype=X.dtype)
for start in range(0, X.shape[0], batch_size):
batch = slice(start, min(X.shape[0], start + batch_size), 1)
X_batch = np.empty(
(X[batch].shape[0], X.shape[1] + self.fit_intercept),
dtype=X.dtype
)
if self.fit_intercept:
X_batch[:, :-1] = X[batch].A - X_mean * scale[batch][:, None]
X_batch[:, -1] = intercept_col[batch]
else:
X_batch = X[batch].A
diag[batch] = (X_batch.dot(A) * X_batch).sum(axis=1)
return diag
def _eigen_decompose_gram(self, X, y, sqrt_sw):
"""Eigendecomposition of X.X^T, used when n_samples <= n_features."""
# if X is dense it has already been centered in preprocessing
K, X_mean = self._compute_gram(X, sqrt_sw)
if self.fit_intercept:
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
K += np.outer(sqrt_sw, sqrt_sw)
eigvals, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return X_mean, eigvals, Q, QT_y
def _solve_eigen_gram(self, alpha, y, sqrt_sw, X_mean, eigvals, Q, QT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X.X^T (n_samples <= n_features).
"""
w = 1. / (eigvals + alpha)
if self.fit_intercept:
# the vector containing the square roots of the sample weights (1
# when no sample weights) is the eigenvector of XX^T which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight).
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, Q)
w[intercept_dim] = 0 # cancel regularization for the intercept
c = np.dot(Q, self._diag_dot(w, QT_y))
G_inverse_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def _eigen_decompose_covariance(self, X, y, sqrt_sw):
"""Eigendecomposition of X^T.X, used when n_samples > n_features
and X is sparse.
"""
n_samples, n_features = X.shape
cov = np.empty((n_features + 1, n_features + 1), dtype=X.dtype)
cov[:-1, :-1], X_mean = self._compute_covariance(X, sqrt_sw)
if not self.fit_intercept:
cov = cov[:-1, :-1]
# to emulate centering X with sample weights,
# ie removing the weighted average, we add a column
# containing the square roots of the sample weights.
# by centering, it is orthogonal to the other columns
# when all samples have the same weight we add a column of 1
else:
cov[-1] = 0
cov[:, -1] = 0
cov[-1, -1] = sqrt_sw.dot(sqrt_sw)
nullspace_dim = max(0, n_features - n_samples)
eigvals, V = linalg.eigh(cov)
# remove eigenvalues and vectors in the null space of X^T.X
eigvals = eigvals[nullspace_dim:]
V = V[:, nullspace_dim:]
return X_mean, eigvals, V, X
def _solve_eigen_covariance_no_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse), and not fitting an intercept.
"""
w = 1 / (eigvals + alpha)
A = (V * w).dot(V.T)
AXy = A.dot(safe_sparse_dot(X.T, y, dense_output=True))
y_hat = safe_sparse_dot(X, AXy, dense_output=True)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance_intercept(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse),
and we are fitting an intercept.
"""
# the vector [0, 0, ..., 0, 1]
# is the eigenvector of X^TX which
# corresponds to the intercept; we cancel the regularization on
# this dimension. the corresponding eigenvalue is
# sum(sample_weight), e.g. n when uniform sample weights.
intercept_sv = np.zeros(V.shape[0])
intercept_sv[-1] = 1
intercept_dim = _find_smallest_angle(intercept_sv, V)
w = 1 / (eigvals + alpha)
w[intercept_dim] = 1 / eigvals[intercept_dim]
A = (V * w).dot(V.T)
# add a column to X containing the square roots of sample weights
X_op = _X_CenterStackOp(X, X_mean, sqrt_sw)
AXy = A.dot(X_op.T.dot(y))
y_hat = X_op.dot(AXy)
hat_diag = self._sparse_multidot_diag(X, A, X_mean, sqrt_sw)
# return (1 - hat_diag), (y - y_hat)
if len(y.shape) != 1:
# handle case where y is 2-d
hat_diag = hat_diag[:, np.newaxis]
return (1 - hat_diag) / alpha, (y - y_hat) / alpha
def _solve_eigen_covariance(
self, alpha, y, sqrt_sw, X_mean, eigvals, V, X):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have a decomposition of X^T.X
(n_samples > n_features and X is sparse).
"""
if self.fit_intercept:
return self._solve_eigen_covariance_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X)
return self._solve_eigen_covariance_no_intercept(
alpha, y, sqrt_sw, X_mean, eigvals, V, X)
def _svd_decompose_design_matrix(self, X, y, sqrt_sw):
# X already centered
X_mean = np.zeros(X.shape[1], dtype=X.dtype)
if self.fit_intercept:
# to emulate fit_intercept=True situation, add a column
# containing the square roots of the sample weights
# by centering, the other columns are orthogonal to that one
intercept_column = sqrt_sw[:, None]
X = np.hstack((X, intercept_column))
U, singvals, _ = linalg.svd(X, full_matrices=0)
singvals_sq = singvals ** 2
UT_y = np.dot(U.T, y)
return X_mean, singvals_sq, U, UT_y
def _solve_svd_design_matrix(
self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):
"""Compute dual coefficients and diagonal of G^-1.
Used when we have an SVD decomposition of X
(n_samples > n_features and X is dense).
"""
w = ((singvals_sq + alpha) ** -1) - (alpha ** -1)
if self.fit_intercept:
# detect intercept column
normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)
intercept_dim = _find_smallest_angle(normalized_sw, U)
# cancel the regularization for the intercept
w[intercept_dim] = - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_inverse_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_inverse_diag = G_inverse_diag[:, np.newaxis]
return G_inverse_diag, c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with gcv.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
Training data. Will be cast to float64 if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to float64 if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float64],
multi_output=True, y_numeric=True)
# alpha_per_target cannot be used in classifier mode. All subclasses
# of _RidgeGCV that are classifiers keep alpha_per_target at its
# default value: False, so the condition below should never happen.
assert not (self.is_clf and self.alpha_per_target)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X,
dtype=X.dtype)
if np.any(self.alphas <= 0):
raise ValueError(
"alphas must be strictly positive. Got {} containing some "
"negative or null value instead.".format(self.alphas))
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = _check_gcv_mode(X, self.gcv_mode)
if gcv_mode == 'eigen':
decompose = self._eigen_decompose_gram
solve = self._solve_eigen_gram
elif gcv_mode == 'svd':
if sparse.issparse(X):
decompose = self._eigen_decompose_covariance
solve = self._solve_eigen_covariance
else:
decompose = self._svd_decompose_design_matrix
solve = self._solve_svd_design_matrix
n_samples = X.shape[0]
if sample_weight is not None:
X, y = _rescale_data(X, y, sample_weight)
sqrt_sw = np.sqrt(sample_weight)
else:
sqrt_sw = np.ones(n_samples, dtype=X.dtype)
X_mean, *decomposition = decompose(X, y, sqrt_sw)
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
n_y = 1 if len(y.shape) == 1 else y.shape[1]
n_alphas = 1 if np.ndim(self.alphas) == 0 else len(self.alphas)
if self.store_cv_values:
self.cv_values_ = np.empty(
(n_samples * n_y, n_alphas), dtype=X.dtype)
best_coef, best_score, best_alpha = None, None, None
for i, alpha in enumerate(np.atleast_1d(self.alphas)):
G_inverse_diag, c = solve(
float(alpha), y, sqrt_sw, X_mean, *decomposition)
if error:
squared_errors = (c / G_inverse_diag) ** 2
if self.alpha_per_target:
alpha_score = -squared_errors.mean(axis=0)
else:
alpha_score = -squared_errors.mean()
if self.store_cv_values:
self.cv_values_[:, i] = squared_errors.ravel()
else:
predictions = y - (c / G_inverse_diag)
if self.store_cv_values:
self.cv_values_[:, i] = predictions.ravel()
if self.is_clf:
identity_estimator = _IdentityClassifier(
classes=np.arange(n_y)
)
alpha_score = scorer(identity_estimator,
predictions, y.argmax(axis=1))
else:
identity_estimator = _IdentityRegressor()
if self.alpha_per_target:
alpha_score = np.array([
scorer(identity_estimator,
predictions[:, j], y[:, j])
for j in range(n_y)
])
else:
alpha_score = scorer(identity_estimator,
predictions.ravel(), y.ravel())
# Keep track of the best model
if best_score is None:
# initialize
if self.alpha_per_target and n_y > 1:
best_coef = c
best_score = np.atleast_1d(alpha_score)
best_alpha = np.full(n_y, alpha)
else:
best_coef = c
best_score = alpha_score
best_alpha = alpha
else:
# update
if self.alpha_per_target and n_y > 1:
to_update = alpha_score > best_score
best_coef[:, to_update] = c[:, to_update]
best_score[to_update] = alpha_score[to_update]
best_alpha[to_update] = alpha
elif alpha_score > best_score:
best_coef, best_score, best_alpha = c, alpha_score, alpha
self.alpha_ = best_alpha
self.best_score_ = best_score
self.dual_coef_ = best_coef
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
X_offset += X_mean * X_scale
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, n_alphas
else:
cv_values_shape = n_samples, n_y, n_alphas
self.cv_values_ = self.cv_values_.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *,
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None, store_cv_values=False,
alpha_per_target=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
self.alpha_per_target = alpha_per_target
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data. If using GCV, will be cast to float64
if necessary.
y : ndarray of shape (n_samples,) or (n_samples, n_targets)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
Notes
-----
When sample_weight is provided, the selected hyperparameter may depend
on whether we use leave-one-out cross-validation (cv=None or cv='auto')
or another form of cross-validation, because only leave-one-out
cross-validation takes the sample weights into account when computing
the validation score.
"""
cv = self.cv
if cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values,
is_clf=is_classifier(self),
alpha_per_target=self.alpha_per_target)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
self.best_score_ = estimator.best_score_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True"
" are incompatible")
if self.alpha_per_target:
raise ValueError("cv!=None and alpha_per_target=True"
" are incompatible")
parameters = {'alpha': self.alphas}
solver = 'sparse_cg' if sparse.issparse(X) else 'auto'
model = RidgeClassifier if is_classifier(self) else Ridge
gs = GridSearchCV(model(fit_intercept=self.fit_intercept,
normalize=self.normalize,
solver=solver),
parameters, cv=cv, scoring=self.scoring)
gs.fit(X, y, sample_weight=sample_weight)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.best_score_ = gs.best_score_
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
self.n_features_in_ = estimator.n_features_in_
return self
class RidgeCV(MultiOutputMixin, RegressorMixin, _BaseRidgeCV):
"""Ridge regression with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs efficient Leave-One-Out Cross-Validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
If using Leave-One-Out cross-validation, alphas must be positive.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If None, the negative mean squared error if cv is 'auto' or None
(i.e. when using leave-one-out cross-validation), and r2 score
otherwise.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used, else,
:class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {'auto', 'svd', eigen'}, default='auto'
Flag indicating which strategy to use when performing
Leave-One-Out Cross-Validation. Options are::
'auto' : use 'svd' if n_samples > n_features, otherwise use 'eigen'
'svd' : force use of singular value decomposition of X when X is
dense, eigenvalue decomposition of X^T.X when X is sparse.
'eigen' : force computation via eigendecomposition of X.X^T
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending on the shape of the training data.
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
alpha_per_target : bool, default=False
Flag indicating whether to optimize the alpha value (picked from the
`alphas` parameter list) for each target separately (for multi-output
settings: multiple prediction targets). When set to `True`, after
fitting, the `alpha_` attribute will contain a value for each target.
When set to `False`, a single alpha is used for all targets.
.. versionadded:: 0.24
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_alphas) or \
shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (only available if
``store_cv_values=True`` and ``cv=None``). After ``fit()`` has been
called, this attribute will contain the mean squared errors
(by default) or the values of the ``{loss,score}_func`` function
(if provided in the constructor).
coef_ : ndarray of shape (n_features) or (n_targets, n_features)
Weight vector(s).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float or ndarray of shape (n_targets,)
Estimated regularization parameter, or, if ``alpha_per_target=True``,
the estimated regularization parameter for each target.
best_score_ : float or ndarray of shape (n_targets,)
Score of base estimator with best alpha, or, if
``alpha_per_target=True``, a score for each target.
.. versionadded:: 0.23
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> X, y = load_diabetes(return_X_y=True)
>>> clf = RidgeCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.5166...
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeClassifierCV : Ridge classifier with built-in cross validation.
"""
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
See glossary entry for :term:`cross-validation estimator`.
By default, it performs Leave-One-Out Cross-Validation. Currently,
only the n_features > n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : ndarray of shape (n_alphas,), default=(0.1, 1.0, 10.0)
Array of alpha values to try.
Regularization strength; must be a positive float. Regularization
improves the conditioning of the problem and reduces the variance of
the estimates. Larger values specify stronger regularization.
Alpha corresponds to ``1 / (2C)`` in other linear models such as
:class:`~sklearn.linear_model.LogisticRegression` or
:class:`~sklearn.svm.LinearSVC`.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
normalize : bool, default=False
This parameter is ignored when ``fit_intercept`` is set to False.
If True, the regressors X will be normalized before regression by
subtracting the mean and dividing by the l2-norm.
If you wish to standardize, please use
:class:`~sklearn.preprocessing.StandardScaler` before calling ``fit``
on an estimator with ``normalize=False``.
scoring : string, callable, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
store_cv_values : bool, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the ``cv_values_`` attribute (see
below). This flag is only compatible with ``cv=None`` (i.e. using
Leave-One-Out Cross-Validation).
Attributes
----------
cv_values_ : ndarray of shape (n_samples, n_targets, n_alphas), optional
Cross-validation values for each alpha (if ``store_cv_values=True`` and
``cv=None``). After ``fit()`` has been called, this attribute will
contain the mean squared errors (by default) or the values of the
``{loss,score}_func`` function (if provided in the constructor). This
attribute exists only when ``store_cv_values`` is True.
coef_ : ndarray of shape (1, n_features) or (n_targets, n_features)
Coefficient of the features in the decision function.
``coef_`` is of shape (1, n_features) when the given problem is binary.
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
best_score_ : float
Score of base estimator with best alpha.
.. versionadded:: 0.23
classes_ : ndarray of shape (n_classes,)
The classes labels.
Examples
--------
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import RidgeClassifierCV
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1]).fit(X, y)
>>> clf.score(X, y)
0.9630...
See Also
--------
Ridge : Ridge regression.
RidgeClassifier : Ridge classifier.
RidgeCV : Ridge regression with built-in cross validation.
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
@_deprecate_positional_args
def __init__(self, alphas=(0.1, 1.0, 10.0), *, fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None,
store_cv_values=False):
super().__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv, store_cv_values=store_cv_values)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge classifier with cv.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features. When using GCV,
will be cast to float64 if necessary.
y : ndarray of shape (n_samples,)
Target values. Will be cast to X's dtype if necessary.
sample_weight : float or ndarray of shape (n_samples,), default=None
Individual weights for each sample. If given a float, every sample
will have the same weight.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True, y_numeric=False)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
target = Y if self.cv is None else y
_BaseRidgeCV.fit(self, X, target, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
def _more_tags(self):
return {
'_xfail_checks': {
'check_sample_weights_invariance':
'zero sample_weight is not equivalent to removing samples',
}
}
|
bsd-3-clause
|
mcdeaton13/dynamic
|
Data/Calibration/Firm_Calibration_Python/parameters/employment/script_wages.py
|
6
|
1821
|
'''
-------------------------------------------------------------------------------
Date created: 5/22/2015
Last updated 5/22/2015
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
Packages:
-------------------------------------------------------------------------------
'''
import os.path
import sys
import numpy as np
import pandas as pd
# Find the directory of this file:
cur_dir = os.path.dirname(__file__)
# Import naics processing file:
try:
import naics_processing as naics
except ImportError:
data_struct_dir = os.path.dirname(os.path.dirname(cur_dir))
data_struct_dir += "\\data_structures"
data_struct_dir = os.path.abspath(data_struct_dir)
sys.path.append(data_struct_dir)
try:
import naics_processing as naics
except ImportError:
print "\n\n ImportError: Failed to import naics_processing \n\n"
# Import the helper functions to read in the national income data:
import read_wages_data as read_wages
'''
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
'''
data_folder = os.path.abspath(cur_dir + "\\data")
naics_codes_file = os.path.abspath(data_folder + "\\NAICS_Codes.csv")
output_folder = os.path.abspath(cur_dir + "\\output")
def main():
#
naics_tree = naics.load_naics(naics_codes_file)
#
read_wages.load_nipa_wages_ind(data_folder, naics_tree)
#
parameters = [read_wages.WAGES]
#
naics.pop_back(naics_tree, parameters)
naics.pop_forward(naics_tree, parameters, None, None, None, True)
#
naics.print_tree_dfs(naics_tree, output_folder)
if __name__ == "script_wages":
main()
|
mit
|
r-mart/scikit-learn
|
sklearn/decomposition/nmf.py
|
100
|
19059
|
""" Non-negative matrix factorization
"""
# Author: Vlad Niculae
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Author: Chih-Jen Lin, National Taiwan University (original projected gradient
# NMF implementation)
# Author: Anthony Di Franco (original Python and NumPy port)
# License: BSD 3 clause
from __future__ import division
from math import sqrt
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.optimize import nnls
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.extmath import randomized_svd, safe_sparse_dot, squared_norm
from ..utils.validation import check_is_fitted, check_non_negative
def safe_vstack(Xs):
if any(sp.issparse(X) for X in Xs):
return sp.vstack(Xs)
else:
return np.vstack(Xs)
def norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T)."""
return np.dot(X.ravel(), Y.ravel())
def _sparseness(x):
"""Hoyer's measure of sparsity for a vector"""
sqrt_n = np.sqrt(len(x))
return (sqrt_n - np.linalg.norm(x, 1) / norm(x)) / (sqrt_n - 1)
def _initialize_nmf(X, n_components, variant=None, eps=1e-6,
random_state=None):
"""NNDSVD algorithm for NMF initialization.
Computes a good initial guess for the non-negative
rank k matrix approximation for X: X = WH
Parameters
----------
X : array, [n_samples, n_features]
The data matrix to be decomposed.
n_components : array, [n_components, n_features]
The number of components desired in the approximation.
variant : None | 'a' | 'ar'
The variant of the NNDSVD algorithm.
Accepts None, 'a', 'ar'
None: leaves the zero entries as zero
'a': Fills the zero entries with the average of X
'ar': Fills the zero entries with standard normal random variates.
Default: None
eps: float
Truncate all values less then this in output to zero.
random_state : numpy.RandomState | int, optional
The generator used to fill in the zeros, when using variant='ar'
Default: numpy.random
Returns
-------
(W, H) :
Initial guesses for solving X ~= WH such that
the number of columns in W is n_components.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
if variant not in (None, 'a', 'ar'):
raise ValueError("Invalid variant name")
random_state = check_random_state(random_state)
U, S, V = randomized_svd(X, n_components, random_state=random_state)
W, H = np.zeros(U.shape), np.zeros(V.shape)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if variant == "a":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif variant == "ar":
avg = X.mean()
W[W == 0] = abs(avg * random_state.randn(len(W[W == 0])) / 100)
H[H == 0] = abs(avg * random_state.randn(len(H[H == 0])) / 100)
return W, H
def _nls_subproblem(V, W, H, tol, max_iter, sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the
projected gradient descent algorithm.
min || WH - V ||_2
Parameters
----------
V, W : array-like
Constant matrices.
H : array-like
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like
Solution to the non-negative least squares problem.
grad : array-like
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix factorization.
Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtV = safe_sparse_dot(W.T, V)
WtW = np.dot(W.T, W)
# values justified in the paper
alpha = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtV
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(19):
# Gradient step.
Hn = H - alpha * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_alpha = not suff_decr
if decr_alpha:
if suff_decr:
H = Hn
break
else:
alpha *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
alpha /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.")
return H, grad, n_iter
class ProjectedGradientNMF(BaseEstimator, TransformerMixin):
"""Non-Negative matrix factorization by Projected Gradient (NMF)
Read more in the :ref:`User Guide <NMF>`.
Parameters
----------
n_components : int or None
Number of components, if n_components is not set all components
are kept
init : 'nndsvd' | 'nndsvda' | 'nndsvdar' | 'random'
Method used to initialize the procedure.
Default: 'nndsvd' if n_components < n_features, otherwise random.
Valid options::
'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
'random': non-negative random matrices
sparseness : 'data' | 'components' | None, default: None
Where to enforce sparsity in the model.
beta : double, default: 1
Degree of sparseness, if sparseness is not None. Larger values mean
more sparseness.
eta : double, default: 0.1
Degree of correctness to maintain, if sparsity is not None. Smaller
values mean larger error.
tol : double, default: 1e-4
Tolerance value used in stopping conditions.
max_iter : int, default: 200
Number of iterations to compute.
nls_max_iter : int, default: 2000
Number of iterations in NLS subproblem.
random_state : int or RandomState
Random number generator seed control.
Attributes
----------
components_ : array, [n_components, n_features]
Non-negative components of the data.
reconstruction_err_ : number
Frobenius norm of the matrix difference between
the training data and the reconstructed data from
the fit produced by the model. ``|| X - WH ||_2``
n_iter_ : int
Number of iterations run.
Examples
--------
>>> import numpy as np
>>> X = np.array([[1,1], [2, 1], [3, 1.2], [4, 1], [5, 0.8], [6, 1]])
>>> from sklearn.decomposition import ProjectedGradientNMF
>>> model = ProjectedGradientNMF(n_components=2, init='random',
... random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0, sparseness=None,
tol=0.0001)
>>> model.components_
array([[ 0.77032744, 0.11118662],
[ 0.38526873, 0.38228063]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.00746...
>>> model = ProjectedGradientNMF(n_components=2,
... sparseness='components', init='random', random_state=0)
>>> model.fit(X) #doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
ProjectedGradientNMF(beta=1, eta=0.1, init='random', max_iter=200,
n_components=2, nls_max_iter=2000, random_state=0,
sparseness='components', tol=0.0001)
>>> model.components_
array([[ 1.67481991, 0.29614922],
[ 0. , 0.4681982 ]])
>>> model.reconstruction_err_ #doctest: +ELLIPSIS
0.513...
References
----------
This implements
C.-J. Lin. Projected gradient methods
for non-negative matrix factorization. Neural
Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
P. Hoyer. Non-negative Matrix Factorization with
Sparseness Constraints. Journal of Machine Learning
Research 2004.
NNDSVD is introduced in
C. Boutsidis, E. Gallopoulos: SVD based
initialization: A head start for nonnegative
matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
def __init__(self, n_components=None, init=None, sparseness=None, beta=1,
eta=0.1, tol=1e-4, max_iter=200, nls_max_iter=2000,
random_state=None):
self.n_components = n_components
self.init = init
self.tol = tol
if sparseness not in (None, 'data', 'components'):
raise ValueError(
'Invalid sparseness parameter: got %r instead of one of %r' %
(sparseness, (None, 'data', 'components')))
self.sparseness = sparseness
self.beta = beta
self.eta = eta
self.max_iter = max_iter
self.nls_max_iter = nls_max_iter
self.random_state = random_state
def _init(self, X):
n_samples, n_features = X.shape
init = self.init
if init is None:
if self.n_components_ < n_features:
init = 'nndsvd'
else:
init = 'random'
rng = check_random_state(self.random_state)
if init == 'nndsvd':
W, H = _initialize_nmf(X, self.n_components_, random_state=rng)
elif init == 'nndsvda':
W, H = _initialize_nmf(X, self.n_components_, variant='a',
random_state=rng)
elif init == 'nndsvdar':
W, H = _initialize_nmf(X, self.n_components_, variant='ar',
random_state=rng)
elif init == "random":
W = rng.randn(n_samples, self.n_components_)
# we do not write np.abs(W, out=W) to stay compatible with
# numpy 1.5 and earlier where the 'out' keyword is not
# supported as a kwarg on ufuncs
np.abs(W, W)
H = rng.randn(self.n_components_, n_features)
np.abs(H, H)
else:
raise ValueError(
'Invalid init parameter: got %r instead of one of %r' %
(init, (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random')))
return W, H
def _update_W(self, X, H, W, tolW):
n_samples, n_features = X.shape
if self.sparseness is None:
W, gradW, iterW = _nls_subproblem(X.T, H.T, W.T, tolW,
self.nls_max_iter)
elif self.sparseness == 'data':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T, np.zeros((1, n_samples))]),
safe_vstack([H.T, np.sqrt(self.beta) * np.ones((1,
self.n_components_))]),
W.T, tolW, self.nls_max_iter)
elif self.sparseness == 'components':
W, gradW, iterW = _nls_subproblem(
safe_vstack([X.T,
np.zeros((self.n_components_, n_samples))]),
safe_vstack([H.T,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
W.T, tolW, self.nls_max_iter)
return W.T, gradW.T, iterW
def _update_H(self, X, H, W, tolH):
n_samples, n_features = X.shape
if self.sparseness is None:
H, gradH, iterH = _nls_subproblem(X, W, H, tolH,
self.nls_max_iter)
elif self.sparseness == 'data':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((self.n_components_, n_features))]),
safe_vstack([W,
np.sqrt(self.eta) * np.eye(self.n_components_)]),
H, tolH, self.nls_max_iter)
elif self.sparseness == 'components':
H, gradH, iterH = _nls_subproblem(
safe_vstack([X, np.zeros((1, n_features))]),
safe_vstack([W,
np.sqrt(self.beta)
* np.ones((1, self.n_components_))]),
H, tolH, self.nls_max_iter)
return H, gradH, iterH
def fit_transform(self, X, y=None):
"""Learn a NMF model for the data X and returns the transformed data.
This is more efficient than calling fit followed by transform.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
X = check_array(X, accept_sparse='csr')
check_non_negative(X, "NMF.fit")
n_samples, n_features = X.shape
if not self.n_components:
self.n_components_ = n_features
else:
self.n_components_ = self.n_components
W, H = self._init(X)
gradW = (np.dot(W, np.dot(H, H.T))
- safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H)
- safe_sparse_dot(W.T, X, dense_output=True))
init_grad = norm(np.r_[gradW, gradH.T])
tolW = max(0.001, self.tol) * init_grad # why max?
tolH = tolW
tol = self.tol * init_grad
for n_iter in range(1, self.max_iter + 1):
# stopping condition
# as discussed in paper
proj_norm = norm(np.r_[gradW[np.logical_or(gradW < 0, W > 0)],
gradH[np.logical_or(gradH < 0, H > 0)]])
if proj_norm < tol:
break
# update W
W, gradW, iterW = self._update_W(X, H, W, tolW)
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = self._update_H(X, H, W, tolH)
if iterH == 1:
tolH = 0.1 * tolH
if not sp.issparse(X):
error = norm(X - np.dot(W, H))
else:
sqnorm_X = np.dot(X.data, X.data)
norm_WHT = trace_dot(np.dot(np.dot(W.T, W), H), H)
cross_prod = trace_dot((X * H.T), W)
error = sqrt(sqnorm_X + norm_WHT - 2. * cross_prod)
self.reconstruction_err_ = error
self.comp_sparseness_ = _sparseness(H.ravel())
self.data_sparseness_ = _sparseness(W.ravel())
H[H == 0] = 0 # fix up negative zeros
self.components_ = H
if n_iter == self.max_iter:
warnings.warn("Iteration limit reached during fit. Solving for W exactly.")
return self.transform(X)
self.n_iter_ = n_iter
return W
def fit(self, X, y=None, **params):
"""Learn a NMF model for the data X.
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be decomposed
Returns
-------
self
"""
self.fit_transform(X, **params)
return self
def transform(self, X):
"""Transform the data X according to the fitted NMF model
Parameters
----------
X: {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix to be transformed by the model
Returns
-------
data: array, [n_samples, n_components]
Transformed data
"""
check_is_fitted(self, 'n_components_')
X = check_array(X, accept_sparse='csc')
Wt = np.zeros((self.n_components_, X.shape[0]))
check_non_negative(X, "ProjectedGradientNMF.transform")
if sp.issparse(X):
Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
tol=self.tol,
max_iter=self.nls_max_iter)
else:
for j in range(0, X.shape[0]):
Wt[:, j], _ = nnls(self.components_.T, X[j, :])
return Wt.T
class NMF(ProjectedGradientNMF):
__doc__ = ProjectedGradientNMF.__doc__
pass
|
bsd-3-clause
|
pianomania/scikit-learn
|
sklearn/mixture/gmm.py
|
19
|
32365
|
"""
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
# Important note for the deprecation cleaning of 0.20 :
# All the functions and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/dpgmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
@deprecated("The function log_multivariate_normal_density is deprecated in 0.18"
" and will be removed in 0.20.")
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
@deprecated("The function sample_gaussian is deprecated in 0.18"
" and will be removed in 0.20."
" Use numpy.random.multivariate_normal instead.")
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array
Randomly generated sample. The shape depends on `n_samples`:
(n_features,) if `1`
(n_features, n_samples) otherwise
"""
_sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None)
def _sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class _GMMBase(BaseEstimator):
"""Gaussian Mixture Model.
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state : RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. The best results is kept.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the Dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) # doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) # doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
self.n_components = n_components
self.covariance_type = covariance_type
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance."""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = _sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: Due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate.
.. versionadded:: 0.17
*fit_predict* method in Gaussian Mixture Model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2,
estimator=self)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < self.tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
"""Perform the Mstep of the EM algorithm and return the cluster weights.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic : float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data.
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic : float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
@deprecated("The class GMM is deprecated in 0.18 and will be "
" removed in 0.20. Use class GaussianMixture instead.")
class GMM(_GMMBase):
"""
Legacy Gaussian Mixture Model
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`sklearn.mixture.GaussianMixture` instead.
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
super(GMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
random_state=random_state, tol=tol, min_covar=min_covar,
n_iter=n_iter, n_init=n_init, params=params,
init_params=init_params, verbose=verbose)
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model."""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model."""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if cv.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model."""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values."""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
@deprecated("The functon distribute_covar_matrix_to_match_covariance_type"
"is deprecated in 0.18 and will be removed in 0.20.")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template."""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for diagonal cases."""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Perform the covariance M step for spherical cases."""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for full cases."""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Perform the covariance M step for tied cases."""
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
|
bsd-3-clause
|
fabianp/scikit-learn
|
examples/linear_model/plot_lasso_model_selection.py
|
311
|
5431
|
"""
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
|
bsd-3-clause
|
IndraVikas/scikit-learn
|
sklearn/manifold/locally_linear.py
|
206
|
25061
|
"""Locally Linear Embedding"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
from scipy.linalg import eigh, svd, qr, solve
from scipy.sparse import eye, csr_matrix
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_random_state, check_array
from ..utils.arpack import eigsh
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..neighbors import NearestNeighbors
def barycenter_weights(X, Z, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[i] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Z : array-like, shape (n_samples, n_neighbors, n_dim)
reg: float, optional
amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)
n_samples, n_neighbors = X.shape[0], Z.shape[1]
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, A in enumerate(Z.transpose(0, 2, 1)):
C = A.T - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::Z.shape[1] + 1] += R
w = solve(G, v, sym_pos=True)
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors + 1).fit(X)
X = knn._fit_X
n_samples = X.shape[0]
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X[ind], reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr),
shape=(n_samples, n_samples))
def null_space(M, k, k_skip=1, eigen_solver='arpack', tol=1E-6, max_iter=100,
random_state=None):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : maximum number of iterations for 'arpack' method
not used if eigen_solver=='dense'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
"""
if eigen_solver == 'auto':
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
if eigen_solver == 'arpack':
random_state = check_random_state(random_state)
v0 = random_state.rand(M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
tol=tol, maxiter=max_iter,
v0=v0)
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def locally_linear_embedding(
X, n_neighbors, n_components, reg=1e-3, eigen_solver='auto', tol=1e-6,
max_iter=100, method='standard', hessian_tol=1E-4, modified_tol=1E-12,
random_state=None):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if method == 'modified'
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
if eigen_solver not in ('auto', 'arpack', 'dense'):
raise ValueError("unrecognized eigen_solver '%s'" % eigen_solver)
if method not in ('standard', 'hessian', 'modified', 'ltsa'):
raise ValueError("unrecognized method '%s'" % method)
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
if n_neighbors >= N:
raise ValueError("n_neighbors must be less than number of points")
if n_neighbors <= 0:
raise ValueError("n_neighbors must be positive")
M_sparse = (eigen_solver != 'dense')
if method == 'standard':
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
elif method == 'hessian':
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError("for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float)
Yi[:, 0] = 1
M = np.zeros((N, N), dtype=np.float)
use_svd = (n_neighbors > d_in)
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
#build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1:1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j:j + n_components - k] = (U[:, k:k + 1]
* U[:, k:n_components])
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1:]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
if M_sparse:
M = csr_matrix(M)
elif method == 'modified':
if n_neighbors < n_components:
raise ValueError("modified LLE requires "
"n_neighbors >= n_components")
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
#find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
#choose the most efficient way to find the eigenvectors
use_svd = (n_neighbors > d_in)
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs,
full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
#find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1E-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
#calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
#find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
#Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = np.zeros((N, N), dtype=np.float)
for i in range(N):
s_i = s_range[i]
#select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i:]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
#compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = alpha_i * np.ones(s_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
#Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
#Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
#We do this much more efficiently:
Wi = (Vi - 2 * np.outer(np.dot(Vi, h), h)
+ (1 - alpha_i) * w_reg[i, :, None])
#Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
#We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], i] -= Wi_sum1
M[i, i] += s_i
if M_sparse:
M = csr_matrix(M)
elif method == 'ltsa':
neighbors = nbrs.kneighbors(X, n_neighbors=n_neighbors + 1,
return_distance=False)
neighbors = neighbors[:, 1:]
M = np.zeros((N, N))
use_svd = (n_neighbors > d_in)
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
tol=tol, max_iter=max_iter, random_state=random_state)
class LocallyLinearEmbedding(BaseEstimator, TransformerMixin):
"""Locally Linear Embedding
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : string, {'auto', 'arpack', 'dense'}
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, optional
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : integer
maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : string ('standard', 'hessian', 'modified' or 'ltsa')
standard : use the standard locally linear embedding algorithm. see
reference [1]
hessian : use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``
see reference [2]
modified : use the modified locally linear embedding algorithm.
see reference [3]
ltsa : use local tangent space alignment algorithm
see reference [4]
hessian_tol : float, optional
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``
modified_tol : float, optional
Tolerance for modified LLE method.
Only used if ``method == 'modified'``
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
Attributes
----------
embedding_vectors_ : array-like, shape [n_components, n_samples]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_vectors_`
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
References
----------
.. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).`
.. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).`
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.`
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382
.. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)`
"""
def __init__(self, n_neighbors=5, n_components=2, reg=1E-3,
eigen_solver='auto', tol=1E-6, max_iter=100,
method='standard', hessian_tol=1E-4, modified_tol=1E-12,
neighbors_algorithm='auto', random_state=None):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(self.n_neighbors,
algorithm=self.neighbors_algorithm)
random_state = check_random_state(self.random_state)
X = check_array(X)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = \
locally_linear_embedding(
self.nbrs_, self.n_neighbors, self.n_components,
eigen_solver=self.eigen_solver, tol=self.tol,
max_iter=self.max_iter, method=self.method,
hessian_tol=self.hessian_tol, modified_tol=self.modified_tol,
random_state=random_state, reg=self.reg)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape [n_samples, n_features]
training set.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
check_is_fitted(self, "nbrs_")
X = check_array(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
|
bsd-3-clause
|
Manolo94/manolo94.github.io
|
MLpython/HW5.py
|
1
|
7050
|
import pandas as pd
import numpy as np
import sys
import random
import copy
# Task 1
task1_data = {'Wins_2016': [3, 3, 2, 2, 6, 6, 7, 7, 8, 7], 'Wins_2017': [5, 4, 8, 3, 2, 4, 3, 4, 5, 6]}
task1_pd = pd.DataFrame(data=task1_data)
iris_df = pd.read_csv('./iris_input/iris.data', names=['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'class'])
iris_test_df = iris_df['class']
iris_data_df = iris_df.drop(columns=['class'])
def Calculate_SSE(centroids : np.array, datapoints_per_centroid : np.array, dist_func):
sse = 0
k = np.size(centroids, axis=0)
# For each cluster
for i in range(k):
datapoints = datapoints_per_centroid[i]
centroid = centroids[i]
if np.size(datapoints, axis=0) == 0:
continue
A = np.full(np.shape(datapoints), centroid)
B = datapoints
def squared_dist(x : np.array, y : np.array):
return dist_func(x, y)**2
differences = np.array(list(map(squared_dist, A, B)))
sse = sse + np.sum(differences)
return sse
def euclidean_dist(a,b):
return np.linalg.norm(a-b)
def manhattan_dist(a,b):
return np.sum(np.abs(a-b))
def gen_jaccard_dist(a,b):
return np.sum(np.min(np.array([a,b]), axis=0)) / np.sum(np.max(np.array([a,b]), axis=0))
def cosine_sim_dist(a,b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
def KMeans(dist_func, data_dataframe, k, max_iterations=0, stop_with_sse=False, initial_centroids=[], epsilon=1e-9):
data_df = pd.DataFrame.to_numpy(data_dataframe)
centroids = []
if len(initial_centroids) == 0:
# Choose the initial centroids randomly using the min and max of each dimension
mins = np.min(data_df, axis=0)
maxs = np.max(data_df, axis=0)
for i in range(k):
new_c = []
for j in range(np.size(data_df, axis=1)):
new_c.append(random.uniform(mins[j], maxs[j]))
centroids.append(new_c)
else:
centroids = initial_centroids
centroids = np.array(centroids)
iteration = 1
new_centroids = copy.deepcopy(centroids)
previous_sse = sys.maxsize
stop_flag = False
# For i iterations
while stop_flag == False:
if iteration == max_iterations:
break
# A list of datapoints for every centroid
datapoints_per_centroid = []
for i in range(k):
datapoints_per_centroid.append([])
#print(datapoints_per_centroid)
#print("Before for all:", centroids)
# For all points
for data_entry in data_df:
min_dist = sys.maxsize
closest = -1
for i in range(k):
dist = dist_func(data_entry, centroids[i])
if dist < min_dist:
min_dist, closest = dist, i
# Assign to the closest centroid
datapoints_per_centroid[closest].append(data_entry)
# Recompute the centroid of each cluster
for i in range(k):
if np.size(datapoints_per_centroid[i], axis=0) > 0:
new_centroids[i] = np.average(datapoints_per_centroid[i], axis = 0)
# Check SSE for all points for all centroids
if stop_with_sse:
sse = Calculate_SSE(centroids, datapoints_per_centroid, dist_func)
if(sse >= previous_sse):
stop_flag = True
else:
previous_sse = sse
all_equal = True
# For each centroid
for i in range(k):
# Check if the centroid moved, using euclidean distance for this
dist_to_new_centroid = dist_func(centroids[i], new_centroids[i])
#print("D", dist_to_new_centroid, centroids[i], new_centroids[i], "S", np.size(datapoints_per_centroid[i], axis=0))
if(dist_to_new_centroid > epsilon):
all_equal = False
# Update each centroid
centroids[i] = copy.deepcopy(new_centroids[i])
#centroids = np.array(centroids)
if all_equal and not stop_with_sse:
stop_flag = True
iteration = iteration + 1
#print("-------------")
#print("Ended at iteration: ", iteration)
return ( centroids, datapoints_per_centroid )
def MeasureAccuracy(centroids, data_df, test_df, dist_func):
labels = pd.unique(test_df)
k = np.size(centroids, axis=0)
# an array of dictionaries, each dictionary maps labels to votes for a particular centroid
centroid_votes = []
for c in range(k):
centroid_votes.append({})
for l in labels:
centroid_votes[c][l] = 0
for index, row in data_df.iterrows():
# For all points
min_dist = sys.maxsize
closest = -1
for i in range(k):
dist = dist_func(row, centroids[i])
if dist < min_dist:
min_dist, closest = dist, i
# Assign to the closest centroid
centroid_votes[closest][test_df.loc[index]] = centroid_votes[closest][test_df.loc[index]] + 1
# For each cluster, for each label, add everything to get a total,
# keep the max_votes and count that as the rights
rights, total = 0, 0
for c in range(k):
max_votes = -1
for k in centroid_votes[c].keys():
if centroid_votes[c][k] > max_votes:
max_votes = centroid_votes[c][k]
total = total + centroid_votes[c][k]
rights = rights + max_votes
return rights / total
(euclidean_centroids, euclidean_dps_per_centroid) = KMeans(euclidean_dist, iris_data_df, 3)
(cosine_centroids, cosine_dps_per_centroid) = KMeans(cosine_sim_dist, iris_data_df, 3, 0, True)
(gen_jaccard_centroids, gen_jaccard_dps_per_centroid) = KMeans(gen_jaccard_dist, iris_data_df, 3, 0, True)
print("Task 1:")
(centroids_1, dpts_per_centroid_1) = KMeans(manhattan_dist, task1_pd, 2, 0, False, [[4,6], [5,4]])
print("(1): ", centroids_1)
(centroids_2, dpts_per_centroid_2) = KMeans(euclidean_dist, task1_pd, 2, 0, False, [[4,6], [5,4]])
print("(2): ", centroids_2)
(centroids_3, dpts_per_centroid_3) = KMeans(manhattan_dist, task1_pd, 2, 0, False, [[3,3], [8,3]])
print("(3): ", centroids_3)
(centroids_4, dpts_per_centroid_4) = KMeans(euclidean_dist, task1_pd, 2, 0, False, [[3,2], [4,8]])
print("(4): ", centroids_4)
print("Task 2:")
print("Euclidean => SSE:", Calculate_SSE(euclidean_centroids, euclidean_dps_per_centroid, euclidean_dist),
MeasureAccuracy(euclidean_centroids, iris_data_df, iris_test_df, euclidean_dist))
print("Cosine Similarity => SSE:", Calculate_SSE(cosine_centroids, cosine_dps_per_centroid, cosine_sim_dist),
MeasureAccuracy(cosine_centroids, iris_data_df, iris_test_df, cosine_sim_dist))
print("Generalized Jaccard similarity => SSE:", Calculate_SSE(gen_jaccard_centroids, gen_jaccard_dps_per_centroid, gen_jaccard_dist),
MeasureAccuracy(gen_jaccard_centroids, iris_data_df, iris_test_df, gen_jaccard_dist))
a = np.array([0,2,7])
b = np.array([-1,5,0])
|
apache-2.0
|
datapythonista/pandas
|
pandas/tests/plotting/common.py
|
3
|
21514
|
"""
Module consolidating common testing functions for checking plotting.
Currently all plotting tests are marked as slow via
``pytestmark = pytest.mark.slow`` at the module level.
"""
from __future__ import annotations
import os
from typing import (
TYPE_CHECKING,
Sequence,
)
import warnings
import numpy as np
from pandas.util._decorators import cache_readonly
import pandas.util._test_decorators as td
from pandas.core.dtypes.api import is_list_like
import pandas as pd
from pandas import (
DataFrame,
Series,
to_datetime,
)
import pandas._testing as tm
if TYPE_CHECKING:
from matplotlib.axes import Axes
@td.skip_if_no_mpl
class TestPlotBase:
"""
This is a common base class used for various plotting tests
"""
def setup_method(self, method):
import matplotlib as mpl
from pandas.plotting._matplotlib import compat
mpl.rcdefaults()
self.start_date_to_int64 = 812419200000000000
self.end_date_to_int64 = 819331200000000000
self.mpl_ge_2_2_3 = compat.mpl_ge_2_2_3()
self.mpl_ge_3_0_0 = compat.mpl_ge_3_0_0()
self.mpl_ge_3_1_0 = compat.mpl_ge_3_1_0()
self.mpl_ge_3_2_0 = compat.mpl_ge_3_2_0()
self.bp_n_objects = 7
self.polycollection_factor = 2
self.default_figsize = (6.4, 4.8)
self.default_tick_position = "left"
n = 100
with tm.RNGContext(42):
gender = np.random.choice(["Male", "Female"], size=n)
classroom = np.random.choice(["A", "B", "C"], size=n)
self.hist_df = DataFrame(
{
"gender": gender,
"classroom": classroom,
"height": np.random.normal(66, 4, size=n),
"weight": np.random.normal(161, 32, size=n),
"category": np.random.randint(4, size=n),
"datetime": to_datetime(
np.random.randint(
self.start_date_to_int64,
self.end_date_to_int64,
size=n,
dtype=np.int64,
)
),
}
)
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def teardown_method(self, method):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (labels is None):
raise ValueError("labels must be specified when visible is True")
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
assert ax.get_legend() is not None
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
assert ax.get_legend() is None
def _check_legend_marker(self, ax, expected_markers=None, visible=True):
"""
Check ax has expected legend markers
Parameters
----------
ax : matplotlib Axes object
expected_markers : list-like
expected legend markers
visible : bool
expected legend visibility. labels are checked only when visible is
True
"""
if visible and (expected_markers is None):
raise ValueError("Markers must be specified when visible is True")
if visible:
handles, _ = ax.get_legend_handles_labels()
markers = [handle.get_marker() for handle in handles]
assert markers == expected_markers
else:
assert ax.get_legend() is None
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
tm.assert_almost_equal(xpdata, rsdata)
assert len(xp_lines) == len(rs_lines)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections, Collection) and not is_list_like(collections):
collections = [collections]
for patch in collections:
assert patch.get_visible() == visible
def _check_patches_all_filled(
self, axes: Axes | Sequence[Axes], filled: bool = True
) -> None:
"""
Check for each artist whether it is filled or not
Parameters
----------
axes : matplotlib Axes object, or its list-like
filled : bool
expected filling
"""
axes = self._flatten_visible(axes)
for ax in axes:
for patch in ax.patches:
assert patch.fill == filled
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(
self, collections, linecolors=None, facecolors=None, mapping=None
):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.collections import (
Collection,
LineCollection,
PolyCollection,
)
from matplotlib.lines import Line2D
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[: len(collections)]
assert len(collections) == len(linecolors)
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, (PolyCollection, LineCollection)):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
assert result == expected
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[: len(collections)]
assert len(collections) == len(facecolors)
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
assert result == expected
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not is_list_like(texts):
assert texts.get_text() == expected
else:
labels = [t.get_text() for t in texts]
assert len(labels) == len(expected)
for label, e in zip(labels, expected):
assert label == e
def _check_ticks_props(
self, axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None
):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize is not None or xrot is not None:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not
# retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
for label in labels:
if xlabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(), xlabelsize)
if xrot is not None:
tm.assert_almost_equal(label.get_rotation(), xrot)
if ylabelsize is not None or yrot is not None:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
for label in labels:
if ylabelsize is not None:
tm.assert_almost_equal(label.get_fontsize(), ylabelsize)
if yrot is not None:
tm.assert_almost_equal(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis="linear", yaxis="linear"):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
assert ax.xaxis.get_scale() == xaxis
assert ax.yaxis.get_scale() == yaxis
def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=None):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to
invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
from pandas.plotting._matplotlib.tools import flatten_axes
if figsize is None:
figsize = self.default_figsize
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
assert len(visible_axes) == axes_num
for ax in visible_axes:
# check something drawn on visible axes
assert len(ax.get_children()) > 0
if layout is not None:
result = self._get_axes_layout(flatten_axes(axes))
assert result == layout
tm.assert_numpy_array_equal(
visible_axes[0].figure.get_size_inches(),
np.array(figsize, dtype=np.float64),
)
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
from pandas.plotting._matplotlib.tools import flatten_axes
axes = flatten_axes(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, "has_xerr", False)
has_yerr = getattr(c, "has_yerr", False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
assert xerr == xerr_count
assert yerr == yerr_count
def _check_box_return_type(
self, returned, return_type, expected_keys=None, check_ax_title=True
):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {"dict": dict, "axes": Axes, "both": tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = "dict"
assert isinstance(returned, types[return_type])
if return_type == "both":
assert isinstance(returned.ax, Axes)
assert isinstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
assert isinstance(r, Axes)
return
assert isinstance(returned, Series)
assert sorted(returned.keys()) == sorted(expected_keys)
for key, value in returned.items():
assert isinstance(value, types[return_type])
# check returned dict has correct mapping
if return_type == "axes":
if check_ax_title:
assert value.get_title() == key
elif return_type == "both":
if check_ax_title:
assert value.ax.get_title() == key
assert isinstance(value.ax, Axes)
assert isinstance(value.lines, dict)
elif return_type == "dict":
line = value["medians"][0]
axes = line.axes
if check_ax_title:
assert axes.get_title() == key
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xticks = self.plt.gca().xaxis.get_major_ticks()
yticks = self.plt.gca().yaxis.get_major_ticks()
# for mpl 2.2.2, gridOn and gridline.get_visible disagree.
# for new MPL, they are the same.
if self.mpl_ge_3_1_0:
xoff = all(not g.gridline.get_visible() for g in xticks)
yoff = all(not g.gridline.get_visible() for g in yticks)
else:
xoff = all(not g.gridOn for g in xticks)
yoff = all(not g.gridOn for g in yticks)
return not (xoff and yoff)
spndx = 1
for kind in kinds:
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc("axes", grid=False)
obj.plot(kind=kind, **kws)
assert not is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc("axes", grid=True)
obj.plot(kind=kind, grid=False, **kws)
assert not is_grid_on()
if kind != "pie":
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc("axes", grid=True)
obj.plot(kind=kind, **kws)
assert is_grid_on()
self.plt.subplot(1, 4 * len(kinds), spndx)
spndx += 1
mpl.rc("axes", grid=False)
obj.plot(kind=kind, grid=True, **kws)
assert is_grid_on()
def _unpack_cycler(self, rcParams, field="color"):
"""
Auxiliary function for correctly unpacking cycler after MPL >= 1.5
"""
return [v[field] for v in rcParams["axes.prop_cycle"]]
def _check_plot_works(f, filterwarnings="always", default_axes=False, **kwargs):
"""
Create plot and ensure that plot return object is valid.
Parameters
----------
f : func
Plotting function.
filterwarnings : str
Warnings filter.
See https://docs.python.org/3/library/warnings.html#warning-filter
default_axes : bool, optional
If False (default):
- If `ax` not in `kwargs`, then create subplot(211) and plot there
- Create new subplot(212) and plot there as well
- Mind special corner case for bootstrap_plot (see `_gen_two_subplots`)
If True:
- Simply run plotting function with kwargs provided
- All required axes instances will be created automatically
- It is recommended to use it when the plotting function
creates multiple axes itself. It helps avoid warnings like
'UserWarning: To output multiple subplots,
the figure containing the passed axes is being cleared'
**kwargs
Keyword arguments passed to the plotting function.
Returns
-------
Plot object returned by the last plotting.
"""
import matplotlib.pyplot as plt
if default_axes:
gen_plots = _gen_default_plot
else:
gen_plots = _gen_two_subplots
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
fig = kwargs.get("figure", plt.gcf())
plt.clf()
for ret in gen_plots(f, fig, **kwargs):
tm.assert_is_valid_plot_return_object(ret)
with tm.ensure_clean(return_filelike=True) as path:
plt.savefig(path)
except Exception as err:
raise err
finally:
tm.close(fig)
return ret
def _gen_default_plot(f, fig, **kwargs):
"""
Create plot in a default way.
"""
yield f(**kwargs)
def _gen_two_subplots(f, fig, **kwargs):
"""
Create plot on two subplots forcefully created.
"""
if "ax" not in kwargs:
fig.add_subplot(211)
yield f(**kwargs)
if f is pd.plotting.bootstrap_plot:
assert "ax" not in kwargs
else:
kwargs["ax"] = fig.add_subplot(212)
yield f(**kwargs)
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
|
bsd-3-clause
|
bradmontgomery/ml
|
book/ch01/analyze_webstats.py
|
23
|
5113
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from utils import DATA_DIR, CHART_DIR
import scipy as sp
import matplotlib.pyplot as plt
sp.random.seed(3) # to reproduce the data later on
data = sp.genfromtxt(os.path.join(DATA_DIR, "web_traffic.tsv"), delimiter="\t")
print(data[:10])
print(data.shape)
# all examples will have three classes in this file
colors = ['g', 'k', 'b', 'm', 'r']
linestyles = ['-', '-.', '--', ':', '-']
x = data[:, 0]
y = data[:, 1]
print("Number of invalid entries:", sp.sum(sp.isnan(y)))
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
# plot input data
def plot_models(x, y, models, fname, mx=None, ymax=None, xmin=None):
plt.figure(num=None, figsize=(8, 6))
plt.clf()
plt.scatter(x, y, s=10)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks(
[w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])
if models:
if mx is None:
mx = sp.linspace(0, x[-1], 1000)
for model, style, color in zip(models, linestyles, colors):
# print "Model:",model
# print "Coeffs:",model.coeffs
plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color)
plt.legend(["d=%i" % m.order for m in models], loc="upper left")
plt.autoscale(tight=True)
plt.ylim(ymin=0)
if ymax:
plt.ylim(ymax=ymax)
if xmin:
plt.xlim(xmin=xmin)
plt.grid(True, linestyle='-', color='0.75')
plt.savefig(fname)
# first look at the data
plot_models(x, y, None, os.path.join(CHART_DIR, "1400_01_01.png"))
# create and plot models
fp1, res1, rank1, sv1, rcond1 = sp.polyfit(x, y, 1, full=True)
print("Model parameters of fp1: %s" % fp1)
print("Error of the model of fp1:", res1)
f1 = sp.poly1d(fp1)
fp2, res2, rank2, sv2, rcond2 = sp.polyfit(x, y, 2, full=True)
print("Model parameters of fp2: %s" % fp2)
print("Error of the model of fp2:", res2)
f2 = sp.poly1d(fp2)
f3 = sp.poly1d(sp.polyfit(x, y, 3))
f10 = sp.poly1d(sp.polyfit(x, y, 10))
f100 = sp.poly1d(sp.polyfit(x, y, 100))
plot_models(x, y, [f1], os.path.join(CHART_DIR, "1400_01_02.png"))
plot_models(x, y, [f1, f2], os.path.join(CHART_DIR, "1400_01_03.png"))
plot_models(
x, y, [f1, f2, f3, f10, f100], os.path.join(CHART_DIR, "1400_01_04.png"))
# fit and plot a model using the knowledge about inflection point
inflection = 3.5 * 7 * 24
xa = x[:inflection]
ya = y[:inflection]
xb = x[inflection:]
yb = y[inflection:]
fa = sp.poly1d(sp.polyfit(xa, ya, 1))
fb = sp.poly1d(sp.polyfit(xb, yb, 1))
plot_models(x, y, [fa, fb], os.path.join(CHART_DIR, "1400_01_05.png"))
def error(f, x, y):
return sp.sum((f(x) - y) ** 2)
print("Errors for the complete data set:")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, x, y)))
print("Errors for only the time after inflection point")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
print("Error inflection=%f" % (error(fa, xa, ya) + error(fb, xb, yb)))
# extrapolating into the future
plot_models(
x, y, [f1, f2, f3, f10, f100],
os.path.join(CHART_DIR, "1400_01_06.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
print("Trained only on data after inflection point")
fb1 = fb
fb2 = sp.poly1d(sp.polyfit(xb, yb, 2))
fb3 = sp.poly1d(sp.polyfit(xb, yb, 3))
fb10 = sp.poly1d(sp.polyfit(xb, yb, 10))
fb100 = sp.poly1d(sp.polyfit(xb, yb, 100))
print("Errors for only the time after inflection point")
for f in [fb1, fb2, fb3, fb10, fb100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
plot_models(
x, y, [fb1, fb2, fb3, fb10, fb100],
os.path.join(CHART_DIR, "1400_01_07.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
# separating training from testing data
frac = 0.3
split_idx = int(frac * len(xb))
shuffled = sp.random.permutation(list(range(len(xb))))
test = sorted(shuffled[:split_idx])
train = sorted(shuffled[split_idx:])
fbt1 = sp.poly1d(sp.polyfit(xb[train], yb[train], 1))
fbt2 = sp.poly1d(sp.polyfit(xb[train], yb[train], 2))
print("fbt2(x)= \n%s"%fbt2)
print("fbt2(x)-100,000= \n%s"%(fbt2-100000))
fbt3 = sp.poly1d(sp.polyfit(xb[train], yb[train], 3))
fbt10 = sp.poly1d(sp.polyfit(xb[train], yb[train], 10))
fbt100 = sp.poly1d(sp.polyfit(xb[train], yb[train], 100))
print("Test errors for only the time after inflection point")
for f in [fbt1, fbt2, fbt3, fbt10, fbt100]:
print("Error d=%i: %f" % (f.order, error(f, xb[test], yb[test])))
plot_models(
x, y, [fbt1, fbt2, fbt3, fbt10, fbt100],
os.path.join(CHART_DIR, "1400_01_08.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
from scipy.optimize import fsolve
print(fbt2)
print(fbt2 - 100000)
reached_max = fsolve(fbt2 - 100000, x0=800) / (7 * 24)
print("100,000 hits/hour expected at week %f" % reached_max[0])
|
mit
|
arabenjamin/scikit-learn
|
sklearn/metrics/regression.py
|
175
|
16953
|
"""Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
|
bsd-3-clause
|
roofit-dev/parallel-roofit-scripts
|
tensorflow_testing/tensorflow_roofit_demo.py
|
1
|
13599
|
# -*- coding: utf-8 -*-
# @Author: patrick
# @Date: 2016-09-01 17:04:53
# @Last Modified by: patrick
# @Last Modified time: 2016-10-04 15:44:41
import tensorflow as tf
import numpy as np
# import scipy as sc
import matplotlib.pyplot as plt
from timeit import default_timer as timer
def apply_constraint(var, constraints):
var_name = var.name[:var.name.find(':')]
# low = tf.constant(constraints[var_name][0], dtype=tf.float64)
# high = tf.constant(constraints[var_name][1], dtype=tf.float64)
low = constraints[var_name][0]
high = constraints[var_name][1]
return tf.assign(var, tf.clip_by_value(var, low, high),
name="assign_to_" + var_name)
# return tf.Variable(tf.clip_by_value(var, low, high), name=var_name + '_clipped')
project_dn = "/home/patrick/projects/apcocsm/"
# project_dn = "/home/pbos/apcocsm/"
m0_num = 5.291
argpar_num = -20.0
constraint = {}
constraint['sigmean'] = (5.20, 5.30)
constraint['sigwidth'] = (0.001, 1.)
constraint['argpar'] = (-100., -1.)
constraint['nsig'] = (0., 10000)
constraint['nbkg'] = (0., 10000)
constraint['mes'] = (5.20, 5.30)
# def gaussian_pdf(x, m, s):
# return sc.stats.norm.pdf(x, loc=m, scale=s)
pi = tf.constant(np.pi, dtype=tf.float64, name="pi")
sqrt2pi = tf.constant(np.sqrt(2 * np.pi), dtype=tf.float64, name="sqrt2pi")
two = tf.constant(2, dtype=tf.float64, name="two")
one = tf.constant(1, dtype=tf.float64, name="one")
zero = tf.constant(0, dtype=tf.float64, name="zero")
def gaussian_pdf(x, mean, std):
val = tf.div(tf.exp(-tf.pow((x - mean) / std, 2) / two), (sqrt2pi * std),
name="gaussian_pdf")
return val
def argus_pdf(m, m0, c, p=0.5):
t = m / m0
# if (t >= 1):
u = 1 - t * t
# return tf.select(tf.greater_equal(t, one),
# zero,
# m * tf.pow(u, p) * tf.exp(c * u))
return tf.cond(tf.greater_equal(t, one),
lambda: zero,
lambda: m * tf.pow(u, p) * tf.exp(c * u), name="argus_pdf")
# N.B.: bij cond moeten de argumenten functies zijn (zonder argumenten)
# zodat tf ze pas hoeft te callen / uit te rekenen als ze nodig zijn.
# Dat is dus bij select niet mogelijk, daar krijg je meteen beide hele
# tensors.
# u = 1 - t * t
# return m * tf.pow(1 - t * t, p) * tf.exp(c * (1 - t * t))
# Double_t RooArgusBG::analyticalIntegral(Int_t code, const char* rangeName) const
# {
# R__ASSERT(code==1);
# // Formula for integration over m when p=0.5
# static const Double_t pi = atan2(0.0,-1.0);
# Double_t min = (m.min(rangeName) < m0) ? m.min(rangeName) : m0;
# Double_t max = (m.max(rangeName) < m0) ? m.max(rangeName) : m0;
# Double_t f1 = (1.-TMath::Power(min/m0,2));
# Double_t f2 = (1.-TMath::Power(max/m0,2));
# Double_t aLow, aHigh ;
# aLow = -0.5*m0*m0*(exp(c*f1)*sqrt(f1)/c + 0.5/TMath::Power(-c,1.5)*sqrt(pi)*RooMath::erf(sqrt(-c*f1)));
# aHigh = -0.5*m0*m0*(exp(c*f2)*sqrt(f2)/c + 0.5/TMath::Power(-c,1.5)*sqrt(pi)*RooMath::erf(sqrt(-c*f2)));
# Double_t area = aHigh - aLow;
# //cout << "c = " << c << "aHigh = " << aHigh << " aLow = " << aLow << " area = " << area << endl ;
# return area;
# }
def argus_integral_phalf(m_low, m_high, m0, c):
"""
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
def F(x):
return -0.5 * m0 * m0 * (tf.exp(c * x) * tf.sqrt(x) / c + 0.5 / tf.pow(-c, 1.5) * tf.sqrt(pi) * tf.erf(tf.sqrt(-c * x)))
a = tf.minimum(m_low, m0)
b = tf.minimum(m_high, m0)
x1 = 1 - tf.pow(a / m0, 2)
x2 = 1 - tf.pow(b / m0, 2)
area = tf.sub(F(x2), F(x1), name="argus_integral_phalf")
return area
def argus_integral_phalf_numpy(m_low, m_high, m0, c):
"""
Only valid for argus_pdf with p=0.5! Otherwise need to do numerical
integral.
"""
import scipy.special
def F(x):
return -0.5 * m0 * m0 * (np.exp(c * x) * np.sqrt(x) / c + 0.5 / (-c)**1.5 * np.sqrt(np.pi) * scipy.special.erf(np.sqrt(-c * x)))
a = np.min([m_low, m0])
b = np.min([m_high, m0])
x1 = 1 - (a / m0)**2
x2 = 1 - (b / m0)**2
area = F(x2) - F(x1)
return area
argus_numerical_norm = tf.constant(argus_integral_phalf_numpy(constraint['mes'][0],
constraint['mes'][1],
m0_num, argpar_num),
dtype=tf.float64, name="argus_numerical_norm")
def argus_pdf_phalf_WN(m, m0, c, m_low, m_high, tf_norm=tf.constant(False)):
"""
WN: with normalization
tf_norm: use the tensorflow integral function (True) or the numpy one (False)
"""
norm = tf.cond(tf_norm,
lambda: argus_integral_phalf(m_low, m_high, m0, c),
lambda: argus_numerical_norm, name="argus_norm")
return argus_pdf(m, m0, c) / norm
# // --- Observable ---
# RooRealVar mes("mes","m_{ES} (GeV)",5.20,5.30) ;
# N.B.: tf heeft geen bounds in Variable!
# mes = tf.Variable("mes", 5.25)
# // --- Build Gaussian signal PDF ---
# RooRealVar sigmean("sigmean","B^{#pm} mass",5.28,5.20,5.30) ;
# RooRealVar sigwidth("sigwidth","B^{#pm} width",0.0027,0.001,1.) ;
sigmean = tf.Variable(np.float64(5.28), name="sigmean")
# sigmean_c = tf.clip_by_value(sigmean, 5.20, 5.30)
sigwidth = tf.Variable(np.float64(0.0027), name="sigwidth")
# sigwidth_c = tf.clip_by_value(sigwidth, 0.001, 1.)
# RooGaussian gauss("gauss","gaussian PDF",mes,sigmean,sigwidth) ;
# gauss = lambda mes: gaussian_pdf(mes, sigmean, sigwidth)
# // --- Build Argus background PDF ---
# RooRealVar argpar("argpar","argus shape parameter",-20.0,-100.,-1.) ;
# RooConstVar m0("m0", "resonant mass", 5.291);
argpar = tf.Variable(np.float64(argpar_num), name="argpar")
# argpar_c = tf.clip_by_value(argpar, -100., -1.)
m0 = tf.constant(np.float64(m0_num), name="m0")
# RooArgusBG argus("argus","Argus PDF",mes,m0,argpar) ;
# argus = lambda mes: argus_pdf(mes, m0, argpar)
# // --- Construct signal+background PDF ---
# RooRealVar nsig("nsig","#signal events",200,0.,10000) ;
# RooRealVar nbkg("nbkg","#background events",800,0.,10000) ;
nsig = tf.Variable(np.float64(200), name="nsig")
# nsig_c = tf.clip_by_value(nsig, 0., 10000)
nbkg = tf.Variable(np.float64(800), name="nbkg")
# nbkg_c = tf.clip_by_value(nbkg, 0., 10000)
# RooAddPdf sum("sum","g+a",RooArgList(gauss,argus),RooArgList(nsig,nbkg)) ;
# sum_pdf = lambda mes: nsig * gauss(mes) + nbkg * argus(mes)
# // --- Generate a toyMC sample from composite PDF ---
# RooDataSet *data = sum.generate(mes,2000) ;
def sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high):
return tf.add(nsig * gaussian_pdf(mes, sigmean, sigwidth), nbkg * argus_pdf_phalf_WN(mes, m0, argpar, mes_low, mes_high), name="sum_pdf")
def sum_pdf_test(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high):
print locals()
return sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, mes_low, mes_high)
sum_pdf_vec = np.vectorize(sum_pdf, otypes=[np.float])
# def sum_pdf_mes(mes):
# # mes_c = tf.clip_by_value(mes, 5.20, 5.30)
# # mes_c = apply_constraint(mes, constraint)
# # return nsig_c * gaussian_pdf(mes, sigmean_c, sigwidth_c) + nbkg_c * argus_pdf(mes, m0, argpar_c)
# return nsig * gaussian_pdf(mes, sigmean, sigwidth) + nbkg * argus_pdf(mes, m0, argpar)
# ok, dit is hier niet triviaal, dus gewoon in RooFit genereren en importeren
# draai dit in ROOT:
# data.write("roofit_demo_random_data_values.dat");
data_raw = np.loadtxt(project_dn + "roofit_demo_random_data_values.dat",
dtype=np.float64)
data = tf.constant(data_raw, name='event_data')
# // --- Perform extended ML fit of composite PDF to toy data ---
# sum.fitTo(*data,"Extended") ;
# convert to tf constants, otherwise you'll get complaints about float32s...
for key in constraint.keys():
low = constraint[key][0]
high = constraint[key][1]
constraint[key] = (tf.constant(low, dtype=tf.float64),
tf.constant(high, dtype=tf.float64))
# using:
# https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html#optimizers
# https://gist.github.com/ibab/45c3d886c182a1ea26d5
# http://stackoverflow.com/a/36267185/1199693
nll = tf.neg(tf.reduce_sum(tf.log(tf.map_fn(lambda mes: sum_pdf(mes, nsig, sigmean, sigwidth, nbkg, m0, argpar, constraint['mes'][0], constraint['mes'][1]), data))), name="nll")
# grad = tf.gradients(nll, [mu, sigma])
# def objective(params):
# mu_, sigma_ = params
# return sess.run(nll, feed_dict={mes: data})
# def gradient(params):
# mu_, sigma_ = params
# ret = sess.run(grad, feed_dict={ mu: mu_, sigma: sigma_ })
# return np.array(ret)
max_steps = 10
sigmean_c = apply_constraint(sigmean, constraint)
sigwidth_c = apply_constraint(sigwidth, constraint)
argpar_c = apply_constraint(argpar, constraint)
nsig_c = apply_constraint(nsig, constraint)
nbkg_c = apply_constraint(nbkg, constraint)
update_vars = [sigmean_c, sigwidth_c, argpar_c, nsig_c, nbkg_c]
variables = tf.all_variables()
# Create an optimizer with the desired parameters.
# opt = tf.train.GradientDescentOptimizer(learning_rate=0.001)
# opt = tf.train.AdagradOptimizer(learning_rate=0.1)
opt = tf.train.AdamOptimizer()
# opt_op = opt.minimize(nll, var_list=[sigmean, sigwidth, argpar, nsig, nbkg])
opt_op = opt.minimize(nll)
tf.scalar_summary('nll', nll)
init_op = tf.initialize_all_variables()
check_op = tf.report_uninitialized_variables()
# start session
with tf.Session() as sess:
# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
summarize_merged = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter('./train', sess.graph)
# Run the init operation.
print sess.run(init_op)
print sess.run(check_op)
true_vars = {}
for v in variables:
key = v.name[:v.name.find(':')]
true_vars[key] = v.eval()
true_vars['m0'] = m0.eval()
print "name\t" + "\t".join([v.name.ljust(10) for v in variables]) + "\t | nll"
print "init\t" + "\t".join(["%6.4e" % v for v in sess.run(variables)])
print
start = timer()
for step in xrange(max_steps):
# print "variables 3:", sess.run(variables)
summary, _ = sess.run([summarize_merged, opt_op])
summary_writer.add_summary(summary, step)
var_values_opt = sess.run(variables)
nll_value_opt = sess.run(nll)
sess.run(update_vars)
var_values_clip = np.array(sess.run(variables))
nll_value_clip = np.array(sess.run(nll))
print "opt\t" + "\t".join(["%6.4e" % v for v in var_values_opt]) + "\t | %f" % nll_value_opt
clipped = np.where(var_values_opt == var_values_clip, [" "*10] * len(variables), ["%6.4e" % v for v in var_values_clip])
print "clip\t" + "\t".join(clipped) + "\t | %f" % nll_value_clip
# Compute the gradients for a list of variables.
# grads_and_vars = opt.compute_gradients(nll, [sigmean, sigwidth, argpar, nsig, nbkg])
# print grads_and_vars
# for gv in grads_and_vars:
# apply_constraint(gv[1], constraint)
# grads_and_vars is a list of tuples (gradient, variable). Do whatever you
# need to the 'gradient' part, for example cap them, etc.
# capped_grads_and_vars = [(gv[0], apply_constraint(gv[1], constraint)) for gv in grads_and_vars]
# Ask the optimizer to apply the capped gradients.
# out = opt.apply_gradients(capped_grads_and_vars)
# out = opt.apply_gradients(grads_and_vars)
# print sess.run([out, nll, sigmean, sigwidth, argpar, nsig, nbkg])
end = timer()
print("Loop took %f seconds" % (end - start))
raise Exception
fit_vars = {}
for v in variables:
key = v.name[:v.name.find(':')]
fit_vars[key] = v.eval()
fit_vars['m0'] = m0.eval()
counts, bins = np.histogram(data.eval(), bins=100)
x_bins = (bins[:-1] + bins[1:]) / 2
y_fit = [sum_pdf(x, mes_low=constraint['mes'][0], mes_high=constraint['mes'][1], **fit_vars).eval() for x in x_bins]
argus_fit = [fit_vars['nbkg'] * argus_pdf_phalf_WN(x, fit_vars['m0'], fit_vars['argpar'], m_low=constraint['mes'][0], m_high=constraint['mes'][1]).eval() for x in x_bins]
y_true = [sum_pdf(x, mes_low=constraint['mes'][0], mes_high=constraint['mes'][1], **true_vars).eval() for x in x_bins]
# normalize fit values to data counts
y_fit_norm = np.sum(counts) / np.sum(y_fit)
y_fit = [y * y_fit_norm for y in y_fit]
# argus_fit_norm = np.sum(counts) / np.sum(argus_fit)
argus_fit = [a * y_fit_norm for a in argus_fit]
y_true_norm = np.sum(counts) / np.sum(y_true)
y_true = [y * y_true_norm for y in y_true]
plt.errorbar(x_bins, counts, yerr=np.sqrt(counts), fmt='.g')
plt.plot(x_bins, y_fit, '-b')
plt.plot(x_bins, argus_fit, '--b')
plt.plot(x_bins, y_true, ':k')
plt.show()
# tf.InteractiveSession()
# sess = tf.Session()
# sess.run(init_op)
# opt = tf.train.GradientDescentOptimizer(learning_rate=1)
# opt_op = opt.minimize(nll, var_list=[sigmean, sigwidth, argpar, nsig, nbkg])
# for step in xrange(10):
# out = sess.run([opt_op, nll, sigmean, sigwidth, argpar, nsig, nbkg])
# print out[1:]
# sess.close()
# // --- Plot toy data and composite PDF overlaid ---
# RooPlot* mesframe = mes.frame() ;
# data->plotOn(mesframe) ;
# sum.plotOn(mesframe) ;
# sum.plotOn(mesframe,Components(argus),LineStyle(kDashed)) ;
# mesframe->Draw();
|
apache-2.0
|
nmartensen/pandas
|
pandas/tests/indexes/timedeltas/test_setops.py
|
15
|
2556
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, Int64Index
class TestTimedeltaIndex(object):
_multiprocess_can_split_ = True
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
|
bsd-3-clause
|
socrata/arcs
|
setup.py
|
1
|
2008
|
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
def read(fname):
"""Utility function to read the README file into the long_description."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires_list = ['pandas>=0.18.1',
'matplotlib>=1.5',
'numpy>=1.11.0',
'frozendict>=0.6',
'simplejson>=3.8.2',
'requests[security]>=2.10.0',
'psycopg2==2.6.1',
'langdetect>=1.0.6',
'scipy>=0.17.1',
'spacy>=0.100']
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
packages_list = [root for root, dirs, files in os.walk('arcs')]
setup(
include_package_data=True,
name="arcs",
version="0.0.1",
author="The Discovery Team",
author_email="discovery-l@socrata.com",
description=("A library for assessing relevance of Socrata's catalog "
"search"),
license = "TBD",
keywords = "search relevance",
url = "http://www.socrata.com",
packages=packages_list,
long_description=read('README.md'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Socrata",
"Topic :: Software Development :: Libraries :: Python Modules",
],
install_requires=install_requires_list,
setup_requires=['pytest-runner'],
tests_require=["pytest==2.6.4"],
cmdclass={'test': PyTest})
|
mit
|
ldirer/scikit-learn
|
sklearn/metrics/cluster/tests/test_bicluster.py
|
394
|
1770
|
"""Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
|
bsd-3-clause
|
canast02/csci544_fall2016_project
|
yelp-sentiment/experiments/sentiment_decisiontree.py
|
1
|
2591
|
import numpy as np
from nltk import TweetTokenizer, accuracy
from nltk.stem.snowball import EnglishStemmer
from sklearn import tree
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from sentiment_util import load_datasets
def main():
# x, y = load_dataset("datasets/sentiment_uci/yelp_labelled.txt")
x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"])
stopwords = set()
with open('../stopwords.txt', 'r') as f:
for w in f:
stopwords.add(w)
tok = TweetTokenizer()
stemmer = EnglishStemmer()
vectorizer = TfidfVectorizer(sublinear_tf=True, use_idf=True, binary=True, preprocessor=stemmer.stem,
tokenizer=tok.tokenize, ngram_range=(1, 2))
accu_p = np.zeros(shape=(2,))
accu_r = np.zeros(shape=(2,))
accu_f = np.zeros(shape=(2,))
accu_a = 0.0
folds = 10
for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True):
train_x, train_y = x[train_idx], y[train_idx]
test_x, test_y = x[test_idx], y[test_idx]
cls = tree.DecisionTreeClassifier()
# train
train_x = vectorizer.fit_transform(train_x).toarray()
cls.fit(train_x, train_y)
# test
test_x = vectorizer.transform(test_x).toarray()
pred_y = cls.predict(test_x)
# evaluate
p, r, f, _ = precision_recall_fscore_support(test_y, pred_y)
a = accuracy_score(test_y, pred_y)
accu_p += p
accu_r += r
accu_f += f
accu_a += a
print("Evaluating classifier:")
print("\tAccuracy: {}".format(a))
print("\tPrecision[0]: {}".format(p[0]))
print("\tPrecision[1]: {}".format(p[1]))
print("\tRecall[0]: {}".format(r[0]))
print("\tRecall[1]: {}".format(r[1]))
print("\tF1-score[0]: {}".format(f[0]))
print("\tF1-score[1]: {}".format(f[1]))
print("Average evaluation")
print("\tAccuracy: {}".format(accu_a / folds))
print("\tPrecision[0]: {}".format(accu_p[0] / folds))
print("\tPrecision[1]: {}".format(accu_p[1] / folds))
print("\tRecall[0]: {}".format(accu_r[0] / folds))
print("\tRecall[1]: {}".format(accu_r[1] / folds))
print("\tF1-score[0]: {}".format(accu_f[0] / folds))
print("\tF1-score[1]: {}".format(accu_f[1] / folds))
if __name__ == '__main__':
main()
|
gpl-3.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/series/test_repr.py
|
1
|
14865
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
from pandas.compat import lrange, range, u
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, Series, date_range, option_context,
period_range, timedelta_range)
from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData):
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
assert repr(s) == expected
def test_name_printing(self):
# Test small Series.
s = Series([0, 1, 2])
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
# Test big Series (diff code path).
s = Series(lrange(0, 1000))
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
s = Series(index=date_range('20010101', '20020101'), name='test')
assert "Name: test" in repr(s)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
assert "\t" not in repr(ser)
assert "\r" not in repr(ser)
assert "a\n" not in repr(ser)
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
assert repr(s) == 'Series([], Name: foo, dtype: int64)'
s = Series([], dtype=np.int64, name=None)
assert repr(s) == 'Series([], dtype: int64)'
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self, capsys):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
# It works (with no Cython exception barf)!
repr(s)
captured = capsys.readouterr()
assert captured.err == ''
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"), ) * 2
repr(s)
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
assert type(df.__repr__() == str) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
assert repr(ts).splitlines()[-1].startswith('Freq:')
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_latex_repr(self):
result = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & $\alpha$ \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
with option_context('display.latex.escape', False,
'display.latex.repr', True):
s = Series([r'$\alpha$', 'b', 'c'])
assert result == s._repr_latex_()
assert s._repr_latex_() is None
class TestCategoricalRepr(object):
def test_categorical_repr_unicode(self):
# GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
# and we are working in PY2, then rendering a Categorical could raise
# UnicodeDecodeError by trying to decode when it shouldn't
class County(StringMixin):
name = u'San Sebastián'
state = u'PR'
def __unicode__(self):
return self.name + u', ' + self.state
cat = pd.Categorical([County() for n in range(61)])
idx = pd.Index(cat)
ser = idx.to_series()
if compat.PY3:
# no reloading of sys, just check that the default (utf8) works
# as expected
repr(ser)
str(ser)
else:
# set sys.defaultencoding to ascii, then change it back after
# the test
with tm.set_defaultencoding('ascii'):
repr(ser)
str(ser)
def test_categorical_repr(self):
a = Series(Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
assert exp == a.__unicode__()
a = Series(Categorical(["a", "b"] * 25))
exp = u("0 a\n1 b\n" + " ..\n" + "48 a\n49 b\n" +
"Length: 50, dtype: category\nCategories (2, object): [a, b]")
with option_context("display.max_rows", 5):
assert exp == repr(a)
levs = list("abcdefghijklmnopqrstuvwxyz")
a = Series(Categorical(["a", "b"], categories=levs, ordered=True))
exp = u("0 a\n1 b\n" + "dtype: category\n"
"Categories (26, object): [a < b < c < d ... w < x < y < z]")
assert exp == a.__unicode__()
def test_categorical_series_repr(self):
s = Series(Categorical([1, 2, 3]))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10)))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0, 1, 2, 3, ..., 6, 7, 8, 9]"""
assert repr(s) == exp
def test_categorical_series_repr_ordered(self):
s = Series(Categorical([1, 2, 3], ordered=True))
exp = """0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]"""
assert repr(s) == exp
s = Series(Categorical(np.arange(10), ordered=True))
exp = """0 0
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
9 9
dtype: category
Categories (10, int64): [0 < 1 < 2 < 3 ... 6 < 7 < 8 < 9]"""
assert repr(s) == exp
def test_categorical_series_repr_datetime(self):
idx = date_range('2011-01-01 09:00', freq='H', periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00,
2011-01-01 12:00:00, 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,
2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_datetime_ordered(self):
idx = date_range('2011-01-01 09:00', freq='H', periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00
1 2011-01-01 10:00:00
2 2011-01-01 11:00:00
3 2011-01-01 12:00:00
4 2011-01-01 13:00:00
dtype: category
Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 <
2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa
assert repr(s) == exp
idx = date_range('2011-01-01 09:00', freq='H', periods=5,
tz='US/Eastern')
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 2011-01-01 11:00:00-05:00
3 2011-01-01 12:00:00-05:00
4 2011-01-01 13:00:00-05:00
dtype: category
Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 <
2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 <
2011-01-01 13:00:00-05:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_period(self):
idx = period_range('2011-01-01 09:00', freq='H', periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00,
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = period_range('2011-01', freq='M', periods=5)
s = Series(Categorical(idx))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_period_ordered(self):
idx = period_range('2011-01-01 09:00', freq='H', periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01-01 09:00
1 2011-01-01 10:00
2 2011-01-01 11:00
3 2011-01-01 12:00
4 2011-01-01 13:00
dtype: category
Categories (5, period[H]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 <
2011-01-01 13:00]""" # noqa
assert repr(s) == exp
idx = period_range('2011-01', freq='M', periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 2011-01
1 2011-02
2 2011-03
3 2011-04
4 2011-05
dtype: category
Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]"""
assert repr(s) == exp
def test_categorical_series_repr_timedelta(self):
idx = timedelta_range('1 days', periods=5)
s = Series(Categorical(idx))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]"""
assert repr(s) == exp
idx = timedelta_range('1 hours', periods=10)
s = Series(Categorical(idx))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00,
3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00,
8 days 01:00:00, 9 days 01:00:00]""" # noqa
assert repr(s) == exp
def test_categorical_series_repr_timedelta_ordered(self):
idx = timedelta_range('1 days', periods=5)
s = Series(Categorical(idx, ordered=True))
exp = """0 1 days
1 2 days
2 3 days
3 4 days
4 5 days
dtype: category
Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa
assert repr(s) == exp
idx = timedelta_range('1 hours', periods=10)
s = Series(Categorical(idx, ordered=True))
exp = """0 0 days 01:00:00
1 1 days 01:00:00
2 2 days 01:00:00
3 3 days 01:00:00
4 4 days 01:00:00
5 5 days 01:00:00
6 6 days 01:00:00
7 7 days 01:00:00
8 8 days 01:00:00
9 9 days 01:00:00
dtype: category
Categories (10, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 <
3 days 01:00:00 ... 6 days 01:00:00 < 7 days 01:00:00 <
8 days 01:00:00 < 9 days 01:00:00]""" # noqa
assert repr(s) == exp
|
bsd-3-clause
|
gnagel/backtrader
|
backtrader/plot/multicursor.py
|
3
|
12203
|
# LICENSE AGREEMENT FOR MATPLOTLIB 1.2.0
# --------------------------------------
#
# 1. This LICENSE AGREEMENT is between John D. Hunter ("JDH"), and the
# Individual or Organization ("Licensee") accessing and otherwise using
# matplotlib software in source or binary form and its associated
# documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, JDH
# hereby grants Licensee a nonexclusive, royalty-free, world-wide license
# to reproduce, analyze, test, perform and/or display publicly, prepare
# derivative works, distribute, and otherwise use matplotlib 1.2.0
# alone or in any derivative version, provided, however, that JDH's
# License Agreement and JDH's notice of copyright, i.e., "Copyright (c)
# 2002-2011 John D. Hunter; All Rights Reserved" are retained in
# matplotlib 1.2.0 alone or in any derivative version prepared by
# Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on or
# incorporates matplotlib 1.2.0 or any part thereof, and wants to
# make the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to matplotlib 1.2.0.
#
# 4. JDH is making matplotlib 1.2.0 available to Licensee on an "AS
# IS" basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 1.2.0
# WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
# 1.2.0 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
# LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
# MATPLOTLIB 1.2.0, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
# THE POSSIBILITY THEREOF.
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between JDH and
# Licensee. This License Agreement does not grant permission to use JDH
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using matplotlib 1.2.0,
# Licensee agrees to be bound by the terms and conditions of this License
# Agreement.
# CHANGES
# The original MultiCursor plots all horizontal lines at the same time
# The modified version plots only the horizontal line in the axis in which the
# motion event takes place
#
# The original MultiCursos uses the ylimit of the las passed axis, to calculate
# the mid point of the axis. which creates a huge distorsion if all axis don't
# have the same y dimensions
#
# The modified version uses the y limits of each axis to calculate the initial
# position of each line avoiding the distorsion
class Widget(object):
"""
Abstract base class for GUI neutral widgets
"""
drawon = True
eventson = True
_active = True
def set_active(self, active):
"""Set whether the widget is active.
"""
self._active = active
def get_active(self):
"""Get whether the widget is active.
"""
return self._active
# set_active is overriden by SelectorWidgets.
active = property(get_active, lambda self, active: self.set_active(active),
doc="Is the widget active?")
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
class MultiCursor(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes.
For the cursor to remain responsive you much keep a reference to
it.
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
show()
"""
def __init__(self, canvas, axes, useblit=True,
horizOn=False, vertOn=True,
horizMulti=False, vertMulti=True,
horizShared=True, vertShared=False,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
self.horizMulti = horizMulti
self.vertMulti = vertMulti
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if self.useblit:
lineprops['animated'] = True
self.vlines = []
if vertOn:
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5 * (xmin + xmax)
for ax in axes:
if not horizShared:
xmin, xmax = ax.get_xlim()
xmid = 0.5 * (xmin + xmax)
vline = ax.axvline(xmid, visible=False, **lineprops)
self.vlines.append(vline)
self.hlines = []
if horizOn:
ymin, ymax = axes[-1].get_ylim()
ymid = 0.5 * (ymin + ymax)
for ax in axes:
if not vertShared:
ymin, ymax = ax.get_ylim()
ymid = 0.5 * (ymin + ymax)
hline = ax.axhline(ymid, visible=False, **lineprops)
self.hlines.append(hline)
self.connect()
def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw)
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if self.ignore(event):
return
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
visible = self.visible
if not self.vertMulti:
visible = visible and line.axes == event.inaxes
if visible:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(visible)
if self.horizOn:
for line in self.hlines:
visible = self.visible
if not self.horizMulti:
visible = visible and line.axes == event.inaxes
if visible:
line.set_ydata((event.ydata, event.ydata))
line.set_visible(self.visible)
self._update(event)
def _update(self, event):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
if self.vertMulti or event.inaxes == line.axes:
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
if self.horizMulti or event.inaxes == line.axes:
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class MultiCursor2(Widget):
"""
Provide a vertical (default) and/or horizontal line cursor shared between
multiple axes.
For the cursor to remain responsive you much keep a reference to
it.
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1,
horizOn=False, vertOn=True)
show()
"""
def __init__(self, canvas, axes, useblit=True, horizOn=False, vertOn=True,
**lineprops):
self.canvas = canvas
self.axes = axes
self.horizOn = horizOn
self.vertOn = vertOn
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5 * (xmin + xmax)
self.visible = True
self.useblit = useblit and self.canvas.supports_blit
self.background = None
self.needclear = False
if self.useblit:
lineprops['animated'] = True
if vertOn:
self.vlines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
else:
self.vlines = []
if horizOn:
self.hlines = []
for ax in axes:
ymin, ymax = ax.get_ylim()
ymid = 0.5 * (ymin + ymax)
hline = ax.axhline(ymid, visible=False, **lineprops)
self.hlines.append(hline)
else:
self.hlines = []
self.connect()
def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear)
def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw)
def clear(self, event):
"""clear the cursor"""
if self.ignore(event):
return
if self.useblit:
self.background = (
self.canvas.copy_from_bbox(self.canvas.figure.bbox))
for line in self.vlines + self.hlines:
line.set_visible(False)
def onmove(self, event):
if self.ignore(event):
return
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
if self.vertOn:
for line in self.vlines:
visible = True or line.axes == event.inaxes
line.set_xdata((event.xdata, event.xdata))
line.set_visible(visible)
if self.horizOn:
for line in self.hlines:
visible = line.axes == event.inaxes
line.set_ydata((event.ydata, event.ydata))
line.set_visible(visible)
self._update(event)
def _update(self, event):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
if self.vertOn:
for ax, line in zip(self.axes, self.vlines):
ax.draw_artist(line)
if self.horizOn:
for ax, line in zip(self.axes, self.hlines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
|
gpl-3.0
|
gbrammer/unicorn
|
object_examples.py
|
2
|
57686
|
import os
import pyfits
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
USE_PLOT_GUI=False
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import threedhst
import threedhst.eazyPy as eazy
import threedhst.catIO as catIO
import unicorn
import unicorn.brown_dwarf
import re
root = None
left = 0.1
bottom = 0.13
dy2d = 0.67
aspect = 0.65
temp_color = (8/255.,47/255.,101/255.)
lrange = np.array([1.05e4,1.68e4])
spec_linewidth=2
pad_linewidth=2
import unicorn
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit, zsp
USE_TEX = True
def fainter_examples():
"""
AEGIS-15-G141_00120 lines, z=2
AEGIS-14-G141_00426, z=2.3, continuum break
AEGIS-1-G141_00891, z=1.6, continuum break
AEGIS-28-G141_00684, H=23, continuum break
COSMOS-17-G141_00451, H=22.1, continuum break
COSMOS-18-G141_00996, H=22.8 continuum break
COSMOS-2-G141_00335, H=22.6, faint continuum + line in massive, dusty galaxy
COSMOS-25-G141_00280, H=22.8, faint continuum break + OIII line, again line looks like comes from elsewhere
COSMOS-25-G141_01354, H=22, nice continuum break
COSMOS-6-G141_00325, H=22.9, high eqw OIII + Hb, xxxx lines come from nearby high eqw object
COSMOS-6-G141_0330, High eqw, H=24.03 (this is the object contaminating the object above)
GOODS-S-23-G141_00780, H=22.6, contamination removal, continuum break, OIII + OII
MARSHALL-225-G141_00356, H=22.9, morphology mess, IR excess, OIII
x = ['AEGIS-15-G141_00120', 'AEGIS-14-G141_00426', 'AEGIS-1-G141_00891','AEGIS-28-G141_00684', 'COSMOS-17-G141_00451','COSMOS-18-G141_00996']
"""
import unicorn.object_examples
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='MARSHALL-225-G141_00356', show_SED=True, sync=False, y0=14, y1=None, SED_voffset=0.40, SED_hoffset=0.05, plot_min=0.0, plot_max=9.5, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=-9, remove_contamination=True, vscale=0.1, vthumb=(-0.1,0.01), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='GOODS-S-23-G141_00780', show_SED=True, sync=False, y0=13, y1=70, SED_voffset=0.07, SED_hoffset=0.05, plot_min=-0.1, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=-2, remove_contamination=True, vscale=0.2, vthumb=(-0.2,0.02), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.08e4,1.68e4])
unicorn.object_examples.general_plot(object='COSMOS-6-G141_00330', show_SED=False, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=4, yticks=[0,1,2,3,4], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.5, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-18, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-25-G141_01354', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=13, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-25-G141_00280', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.12, SED_hoffset=0.05, plot_min=0, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-2-G141_00335', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=30, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-18-G141_00996', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00684', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.08e4,1.8e4])
unicorn.object_examples.general_plot(object='AEGIS-15-G141_00120', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=3, plot_max=12, yticks=[4,6,8,10,12], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.8e4])
unicorn.object_examples.general_plot(object='AEGIS-1-G141_00891', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=8, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-14-G141_00426', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=-0.5, plot_max=5.8, yticks=[0,2,4], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object='COSMOS-17-G141_00451', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.4, vthumb=(-0.8,0.08), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True, line_stats_pos=(-0.2, 0.05))
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00684', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.07, SED_hoffset=0.05, plot_min=-0.5, plot_max=6.2, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.5,0.05), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
ids = ['AEGIS-15-G141_00120', 'AEGIS-14-G141_00426', 'AEGIS-1-G141_00891','AEGIS-28-G141_00684', 'COSMOS-17-G141_00451','COSMOS-18-G141_00996']
for id in ids:
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object=id, show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True)
xx = """
Line emitters:
AEGIS-12-G141_00566, H=23.05
AEGIS-12-G141_00702, H=23.29
AEGIS-28-G141_00159
"""
ids = ['AEGIS-12-G141_00566','AEGIS-12-G141_00702','AEGIS-28-G141_00159','AEGIS-4-G141_00202','COSMOS-11-G141_00650','COSMOS-13-G141_01167','COSMOS-15-G141_00275','COSMOS-15-G141_00284','COSMOS-18-G141_00556','COSMOS-23-G141_00521','COSMOS-4-G141_00596','COSMOS-9-G141_01078','GOODS-S-27-G141_00387','PRIMO-1026-G141_00196','AEGIS-4-G141_00432','PRIMO-1026-G141_00491','PRIMO-1101-G141_00280']
for id in ids:
unicorn.object_examples.lrange = np.array([1.0e4,1.75e4])
unicorn.object_examples.general_plot(object=id, show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.08, SED_hoffset=0.05, plot_min=0, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
#
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-4-G141_00202', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-4-G141_00432', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=18, yticks=[0,5,10,15], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-12-G141_00566', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=11, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-12-G141_00702', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='AEGIS-28-G141_00159', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-4-G141_00596', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=16, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.00e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-9-G141_01078', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-11-G141_00650', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0., plot_max=12, yticks=[0,2,4,6,8,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-13-G141_01167', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-15-G141_00275', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-18-G141_00556', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-2, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='COSMOS-23-G141_00521', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=7, yticks=[0,2,4,6], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='GOODS-S-27-G141_00387', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=-0.5, plot_max=9, yticks=[0,2,4,6,8], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='PRIMO-1026-G141_00196', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=0, plot_max=14, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
unicorn.object_examples.lrange = np.array([1.01e4,1.79e4])
unicorn.object_examples.general_plot(object='PRIMO-1026-G141_00491', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.4, SED_hoffset=0.05, plot_min=0, plot_max=16, yticks=[0,5,10], fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=0.3, vthumb=(-0.3,0.03), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-19, scale_to_f140_mag=True, show_line_stats=True)
def run_all():
import unicorn.object_examples
unicorn.object_examples.agn_group()
unicorn.object_examples.z4_quasar()
unicorn.object_examples.big_dead_galaxy()
unicorn.object_examples.high_signal_to_noise_galaxy()
unicorn.object_examples.l_dwarf()
unicorn.object_examples.t_dwarf()
def agn_group():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### AGN group
####
###################################################
# GOODS-N-36-G141_00991 / 1005
# for object in ['GOODS-N-36-G141_00991','GOODS-N-36-G141_01005']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/GOODS-N-36-G141_00991_thumb.fits.gz')
twod = pyfits.open('DATA/GOODS-N-36-G141_00991_2d.fits.gz')
spec2d = twod[1].data
y0, y1 = 24, 79
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.2, vmax=0.025, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect*plot_aspect/pix_aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0+5:y1+5], vmin=-0.8, vmax=0.1, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-36-G141_00991', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
## Secondary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-36-G141_01005', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
#ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='orange', linewidth=1, alpha=0.7)
####
zspec = 1.773
mag = phot.mag_f1392w[phot.id == 'GOODS-N-36-G141_00991'][0]
ax.text(0.05,0.8,r'$a)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
lines = [4102, 4341, 4862, 4980*1.08]
y0 = [0.7, 0.7, 1, 1.5]
labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','[OIII]4959+5007']
for i in range(len(lines)):
ax.text(lines[i]*(1+zspec), 3*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.1)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
print 'Savefig'
print os.getcwd()
fig.savefig('agn_group.pdf')
######## Brown dwarf
### AEGIS-3-G141_00195 T-type
### GOODS-N-24-G141_01148 L-type
####### Massive galaxies
### z=2.0, huge, old: COSMOS-26-G141_00725
### z=1.9, zspec, beautiful fit UDF: PRIMO-1101-G141_01022
def z4_quasar():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### z=4.6 quasar
####
###################################################
# for object in ['COSMOS-28-G141_00896']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/COSMOS-28-G141_00896_thumb.fits.gz')
twod = pyfits.open('DATA/COSMOS-28-G141_00896_2d.fits.gz')
spec2d = twod[1].data
y0, y1 = 10,32
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.2, vmax=0.025, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0-2:y1-2, y0-2:y1-2], vmin=-2.4, vmax=0.3, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='COSMOS-28-G141_00896', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert*1.15
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 4.656
mag = phot.mag_f1392w[phot.id == 'COSMOS-28-G141_00896'][0]
ax.text(0.05,0.8,r'$b)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
lines = [2799, 2326, 2439.]
y0 = [1.5, 0.7, 0.7, 0.7]
labels = ['Mg II', 'C II', 'Ne IV']
for i in range(len(lines)):
ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.1)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2])
print 'Savefig'
print os.getcwd()
fig.savefig('z4_quasar.pdf')
def big_dead_galaxy():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
# for object in ['COSMOS-26-G141_00725']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/COSMOS-26-G141_00725_thumb.fits.gz')
twod = pyfits.open('DATA/COSMOS-26-G141_00725_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 24, 60
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0+1:y1+1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*1.2, vmax=0.0125*1.2, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-2.4, vmax=0.3, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='COSMOS-26-G141_00725', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-18*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 2.0832
mag = phot.mag_f1392w[phot.id == 'COSMOS-26-G141_00725'][0]
ax.text(0.05,0.8,r'$c)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1,ymax*1.2)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2,3])
#### Inset full sed
ax = fig.add_axes((left+0.55, bottom+0.1, 0.99-left-0.6, dy2d*0.4))
ax.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=2)
ax.plot(lambdaz,temp_sed, color='red', linewidth=1, alpha=0.3)
ax.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
ax.semilogx()
ax.set_xlim(3000,9.e4)
ax.set_ylim(-0.1*ymax,ymax*1.2)
ax.set_yticklabels([])
ax.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = ax.set_xticks([1.e4,5.e4]); ytick = ax.set_yticks([0,1,2,3])
#print os.getcwd()
fig.savefig('big_dead_galaxy.pdf')
def high_signal_to_noise_galaxy():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### z=4.6 quasar
####
###################################################
#
# for object in ['PRIMO-1101-G141_01022']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/PRIMO-1101-G141_01022_thumb.fits.gz')
twod = pyfits.open('DATA/PRIMO-1101-G141_01022_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 31, 56
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='PRIMO-1101-G141_01022', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-19*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-19*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'PRIMO-1101-G141_01022'][0]
#mass = mcat.lmass
ax.text(0.05,0.8,r'$d)\ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-19}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2,3,4,5])
#### Inset full sed
ax = fig.add_axes((left+0.55, bottom+0.1, 0.99-left-0.6, dy2d*0.4))
ax.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=2)
ax.plot(lambdaz,temp_sed, color='red', linewidth=1, alpha=0.3)
ax.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
ax.semilogx()
ax.set_xlim(3000,9.e4)
ax.set_ylim(-0.1*ymax,ymax*1.1)
ax.set_yticklabels([])
ax.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = ax.set_xticks([1.e4,5.e4]); ytick = ax.set_yticks([0,1,2,3,4,5])
print 'Savefig'
#print os.getcwd()
fig.savefig('high_signal_to_noise_galaxy.pdf')
#
def l_dwarf():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### L dwarf
####
###################################################
#
# for object in ['GOODS-N-24-G141_01148']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/GOODS-N-24-G141_01148_thumb.fits.gz')
twod = pyfits.open('DATA/GOODS-N-24-G141_01148_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 10, 30
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0:y1, y0:y1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='GOODS-N-24-G141_01148', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-19*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-19*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
bd = unicorn.brown_dwarf.BD_fit()
type = ['L4']
ii = 0
colors = ['green','blue','red','orange']
for temp in bd.templates:
if temp.type[0:2] in type:
if temp.type == 'L3+/-1':
continue
print temp.type
yint = np.interp(lci[is_spec], temp.wave, temp.flux)
norm = np.sum(fobs[is_spec]*yint)/np.sum(yint**2)
ax.plot(temp.wave, temp.flux*norm, color='white', linewidth=2, alpha=0.4)
ax.plot(temp.wave, temp.flux*norm, color=colors[ii % 4], linewidth=2, alpha=0.7)
ax.text(0.9-ii*0.08, 0.83, temp.type, color=colors[ii % 4], transform=ax.transAxes)
ii = ii + 1
#ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
#ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.5)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'GOODS-N-24-G141_01148'][0]
ax.text(0.05,0.8,r'$f)\ m_{140}=%.1f$' %(mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-19}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,5,10,15])
print 'Savefig'
#print os.getcwd()
fig.savefig('l_dwarf.pdf')
def t_dwarf():
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
######## AGN/Quasars
### Binary quasar: GOODS-N-42-G141_00388/384
### z=2.2 quasar: COSMOS-1-G141_00206
### very broad H-a line, z=1.22: PRIMO-1101-G141_00993
### Even more interesting merger/quasar, z=1.778: GOODS-N-36-G141_00991
### Another mess: COSMOS-3-G141_01156, z=1.34
### Multiple components, z=1.27 GOODS-N-33-G141_01028/1073/1069/1055
### z=4.6, MgII: COSMOS-28-G141_00896
###################################################
####
#### L dwarf
####
###################################################
#
# for object in ['AEGIS-3-G141_00195']:
# os.system('rsync -avz $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
# os.system('rsync -avz $UNICORN:/3DHST/Spectra/Work/ANALYSIS/REDSHIFT_FITS_v1.6/OUTPUT/%s* DATA/' %(object))
thumb = pyfits.open('DATA/AEGIS-3-G141_00195_thumb.fits.gz')
twod = pyfits.open('DATA/AEGIS-3-G141_00195_2d.fits.gz')
spec2d = twod[1].data-twod[4].data
y0, y1 = 10, 30
if USE_TEX:
plt.rcParams['text.usetex'] = True
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times'
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12)
#### Twod
ax = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*0.8, vmax=0.0125*0.8, interpolation='nearest')
ax.set_yticklabels([]); ax.set_xticklabels([])
xtick = ax.set_xticks(tick_int); ytick = ax.set_yticks([0,y1-y0])
#### Thumb
ax = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
ax.imshow(0-thumb[0].data[y0-1:y1-1, y0-1:y1-1], vmin=-1.4, vmax=0.15, interpolation='nearest', zorder=2, aspect='auto')
ax.set_yticklabels([])
ax.set_xticklabels([])
xtick = ax.set_xticks([0,y1-y0]); ytick = ax.set_yticks([0,y1-y0])
#### Spectrum
ax = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(0, MAIN_OUTPUT_FILE='AEGIS-3-G141_00195', OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same')
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**-18*(lci/5500.)**2
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**-18*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
ax.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
bd = unicorn.brown_dwarf.BD_fit()
type = ['T6','T5']
ii = 0
colors = ['green','blue','red','orange']
for temp in bd.templates:
if temp.type[0:2] in type:
if temp.type == 'L3+/-1':
continue
print temp.type
yint = np.interp(lci[is_spec], temp.wave, temp.flux)
norm = np.sum(fobs[is_spec]*yint)/np.sum(yint**2)
ax.plot(temp.wave, temp.flux*norm, color='white', linewidth=2, alpha=0.4)
ax.plot(temp.wave, temp.flux*norm, color=colors[ii % 4], linewidth=2, alpha=0.7)
ax.text(0.9-ii*0.05, 0.83, temp.type, color=colors[ii % 4], transform=ax.transAxes, fontsize=11)
ii = ii + 1
#ax.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
#ax.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.5)
####
zspec = 1.905
mag = phot.mag_f1392w[phot.id == 'AEGIS-3-G141_00195'][0]
ax.text(0.05,0.8,r'$e)\ m_{140}=%.1f$' %(mag), transform=ax.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# ax.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
ax.set_ylim(-0.1*ymax,ymax*1.3)
ax.set_xlim(lrange[0], lrange[1])
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel(r'$f_\lambda\ [10^{-18}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$')
ytick = ax.set_yticks([0,1,2])
print 'Savefig'
#print os.getcwd()
fig.savefig('t_dwarf.pdf')
######## Brown dwarf
### AEGIS-3-G141_00195 T-type
### GOODS-N-24-G141_01148 L-type
####### Massive galaxies
### z=2.0, huge, old: COSMOS-26-G141_00725
### z=1.9, zspec, beautiful fit UDF: PRIMO-1101-G141_01022
def get_tdwarf_mag():
"""
Get the broad / medium-band H magnitudes of the T dwarf
to compare to m140
"""
unicorn.catalogs.read_catalogs()
from unicorn.catalogs import zout, phot, mcat, lines, rest, gfit
object = 'AEGIS-3-G141_00195'
ra = phot.x_world[phot.id == object][0]
dec = phot.y_world[phot.id == object][0]
m140 = phot.mag_f1392w[phot.id == object][0]
nmbs_cat, nmbs_zout, nmbs_fout = unicorn.analysis.read_catalogs(root=object)
dr = np.sqrt((nmbs_cat.ra-ra)**2*np.cos(dec/360*2*np.pi)**2+(nmbs_cat.dec-dec)**2)*3600.
h1mag = 25-2.5*np.log10((nmbs_cat.H1*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
h2mag = 25-2.5*np.log10((nmbs_cat.H2*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
hmag = 25-2.5*np.log10(((nmbs_cat.H1+nmbs_cat.H2)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
jmag = 25-2.5*np.log10(((nmbs_cat.J2+nmbs_cat.J3)/2.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
jmag = 25-2.5*np.log10(((nmbs_cat.J3)/1.*nmbs_cat.Ktot/nmbs_cat.K)[dr == dr.min()][0])
wirds = catIO.Readfile('/Users/gbrammer/research/drg/PHOTZ/EAZY/WIRDS/WIRDS_D3-95_Ks_ugrizJHKs_141927+524056_T0002.cat.candels')
dr = np.sqrt((wirds.ra-ra)**2*np.cos(dec/360.*2*np.pi)**2+(wirds.dec-dec)**2)*3600.
jwirds = wirds.jtot[dr == dr.min()][0]
hwirds = wirds.htot[dr == dr.min()][0]
print ' J H J-H H1 H2'
print 'NMBS %5.2f %5.2f %5.2f %5.2f %5.2f' %(jmag, hmag, jmag-hmag, h1mag, h2mag)
print 'WIRDS %5.2f %5.2f %5.2f' %(jwirds, hwirds, jwirds-hwirds)
#### Vrba et al. (2004)
#absH = np.array([14.52,14.78,15.07])
#d =
def misc_objects():
unicorn.object_examples.general_plot('UDF-Full-G141_00624', flam_norm=-19, vscale=0.1, vthumb=(-0.08*0.3,0.01*0.3), SED_voffset=0.42, SED_hoffset=0.05, remove_contamination=False)
def general_plot(object='AEGIS-9-G141_00154', show_SED=True, sync=False, y0=None, y1=None, SED_voffset=0.1, SED_hoffset=0, plot_min=None, plot_max=None, yticks=None, fit_path='REDSHIFT_FITS_v1.6', dy_thumb=0, dx_thumb=0, remove_contamination=True, vscale=1, vthumb=(-1,0.1), fit_version=0, show_2D = True, show_Thumb=True, show_Fit=True, flam_norm=-18, scale_to_f140_mag=True, show_line_stats=False, line_stats_pos=(0.05, 0.05)):
import unicorn.catalogs
lines = unicorn.catalogs.lines
import unicorn.object_examples
dy2d = unicorn.object_examples.dy2d
os.chdir('/research/HST/GRISM/3DHST/ANALYSIS/SURVEY_PAPER/OBJECT_EXAMPLES')
### F_lambda
## obs_convert = 10**(-0.4*(abzp+48.6))*3.e18/lc**2/10.**-18
if not os.path.exists('DATA/%s.zout' %(object)):
sync=True
if sync:
os.system('rsync -avz --progress $UNICORN:/Users/gbrammer/Sites_GLOBAL/P/GRISM_v1.6/images/%s* DATA/' %(object))
os.system('rsync -avz --progress $UNICORN:/3DHST/Spectra/Work/ANALYSIS/%s/OUTPUT/%s* DATA/' %(fit_path, object))
zout_file = catIO.Readfile('DATA/%s.zout' %(object))
thumb = pyfits.open('DATA/%s_thumb.fits.gz' %(object))
twod = pyfits.open('DATA/%s_2d.fits.gz' %(object))
spec2d = twod[1].data
if remove_contamination:
spec2d -= twod[4].data
#y0, y1 = 24, 60
if y0 is None:
y0 = 0
if y1 is None:
y1 = spec2d.shape[0]
print 'NY: %d' %(spec2d.shape[0])
fig = unicorn.catalogs.plot_init(square=True, xs=5, aspect=aspect, left=0.12, use_tex=USE_TEX)
#### Twod
if show_2D:
ax2D = fig.add_axes((left, bottom+dy2d, 0.99-left, 0.99-bottom-dy2d))
ax2D.plot([0,1])
head = twod[1].header
lam_idx = np.arange(head['NAXIS1'])
lam = (lam_idx+1-head['CRPIX1'])*head['CDELT1']+head['CRVAL1']
lam_mima = np.cast[int](np.round(np.interp(lrange, lam, lam_idx)))
tick_int = np.interp(np.array([1.2,1.4,1.6])*1.e4, lam, lam_idx) - np.interp(lrange[0], lam, lam_idx)-0.75
plot_aspect = (bottom+dy2d)/(0.99-bottom-dy2d)/aspect
pix_aspect = (lam_mima[1]-lam_mima[0])*1./(y1-y0)
spec2d_sub = spec2d[y0:y1,lam_mima[0]:lam_mima[1]]
ax2D.imshow(0-spec2d_sub, aspect='auto', vmin=-0.1*1.2*vscale, vmax=0.0125*1.2*vscale, interpolation='nearest')
ax2D.set_yticklabels([]); ax2D.set_xticklabels([])
xtick = ax2D.set_xticks(tick_int); ytick = ax2D.set_yticks([0,y1-y0])
#### Thumb
if show_Thumb:
axThumb = fig.add_axes((left, bottom+dy2d, (0.99-bottom-dy2d)*aspect, 0.99-bottom-dy2d))
if dx_thumb is None:
dx_thumb = dy_thumb
axThumb.imshow(0-thumb[0].data[y0+dy_thumb:y1+dy_thumb, y0+dx_thumb:y1+dx_thumb], vmin=vthumb[0], vmax=vthumb[1], interpolation='nearest', zorder=2, aspect='auto')
axThumb.set_yticklabels([])
axThumb.set_xticklabels([])
xtick = axThumb.set_xticks([0,y1-y0]); ytick = axThumb.set_yticks([0,y1-y0])
else:
axThumb=None
else:
ax2D = None
axThumb=None
dy2d = 0.99-bottom
#### Spectrum
axSpec = fig.add_axes((left, bottom, 0.99-left, dy2d))
## Primary
lambdaz, temp_sed, lci, obs_sed, fobs, efobs = eazy.getEazySED(fit_version, MAIN_OUTPUT_FILE=object, OUTPUT_DIRECTORY='DATA', CACHE_FILE = 'Same', scale_flambda=False)
dlam_spec = lci[-1]-lci[-2]
is_spec = np.append(np.abs(1-np.abs(lci[1:]-lci[0:-1])/dlam_spec) < 0.05,True)
obs_convert = 10**(-0.4*(25+48.6))*3.e18/lci**2/10.**flam_norm*(lci/5500.)**2
#obs_convert = 10**-17/10**flam_norm # now comes out of getEazySED in units of 10**-17 flam
fobs, efobs, obs_sed = fobs*obs_convert, efobs*obs_convert, obs_sed*obs_convert
#### Try integrating the spectrum and comparing to mag
fnu = fobs*lci**2/3.e18*10**(flam_norm)
xfilt, yfilt = np.loadtxt(os.getenv('iref')+'/F140W.dat', unpack=True)
yint = np.interp(lci[is_spec], xfilt, yfilt)
m140_int = -2.5*np.log10(np.trapz(yint*fnu[is_spec],lci[is_spec])/np.trapz(yint,lci[is_spec]))-48.6
try:
mag = phot.mag_f1392w[phot.id == object][0]
except:
mag = -1
#
print m140_int, mag
if (mag > 0) & scale_to_f140_mag:
scale_to_f140 = 10**(-0.4*(mag-m140_int))
fobs, efobs, obs_sed = fobs*scale_to_f140, efobs*scale_to_f140, obs_sed*scale_to_f140
temp_sed = temp_sed * scale_to_f140
temp_sed *= 10**(-0.4*(25+48.6))*3.e18/lambdaz**2/10.**flam_norm*(lambdaz/5500.)**2
ymax = max(fobs[is_spec & (fobs > 0)])
axSpec.plot(lci[is_spec],fobs[is_spec], color='black', linewidth=spec_linewidth)
if show_Fit:
axSpec.plot(lci[is_spec],obs_sed[is_spec], color='white', alpha=0.8, linewidth=pad_linewidth)
axSpec.plot(lci[is_spec],obs_sed[is_spec], color='red', linewidth=1, alpha=0.7)
####
zspec = 2.0832
#zspec = zout.z_peak[0::3][zout.id[0::3] == object]
zspec = zout_file.z_peak[fit_version]
if USE_TEX:
object_str = object.replace('_','\_')
else:
object_str = object
axSpec.text(0.05,0.9, object_str, transform=axSpec.transAxes, fontsize=9, backgroundcolor='white')
if mag > 0:
axSpec.text(0.05,0.8,r'$ \ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=axSpec.transAxes, fontsize=11, color='white', backgroundcolor='white', alpha=0.2)
axSpec.text(0.05,0.8,r'$ \ z=%.3f,\ m_{140}=%.1f$' %(zspec, mag), transform=axSpec.transAxes, fontsize=11)
else:
axSpec.text(0.05,0.8,r'$z=%.3f$' %(zspec), transform=axSpec.transAxes, fontsize=11)
# lines = [4102, 4341, 4862, 4980]
# y0 = [0.7, 0.7, 1, 1.5]
# labels = [r'H$\delta$',r'H$\gamma$', r'H$\beta$','O III 4959+5007']
# for i in range(len(lines)):
# axSpec.text(lines[i]*(1+zspec), 0.5*y0[i], labels[i], horizontalalignment='center')
if plot_min is None:
plot_min = -0.1*ymax
if plot_max is None:
plot_max = 1.2*ymax
axSpec.set_ylim(plot_min,plot_max)
axSpec.set_xlim(lrange[0], lrange[1])
axSpec.set_xlabel(r'$\lambda\ [\mathrm{\AA}]$')
axSpec.set_ylabel(r'$f_\lambda\ [10^{%0d}\ \mathrm{erg\ s^{-1}\ cm^{-2}\ \AA^{-1}}]$' %(flam_norm))
if yticks is not None:
ytick = axSpec.set_yticks(yticks)
#### Inset full sed
if show_SED:
axInset = fig.add_axes((left+0.55+SED_hoffset, bottom+SED_voffset, 0.99-left-0.6, dy2d*0.4))
axInset.plot(lci[is_spec], fobs[is_spec], alpha=0.9, color='black', linewidth=1)
axInset.plot(lambdaz, temp_sed, color='red', linewidth=1, alpha=0.3)
axInset.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.5, color='white')
axInset.plot(lci[~is_spec], fobs[~is_spec], marker='o', linestyle='None', alpha=0.3, color='black')
axInset.semilogx()
axInset.set_xlim(3000,9.e4)
axInset.set_ylim(-0.1*ymax,ymax*1.2)
axInset.set_xticklabels([r'$10^4$',r'$5\times10^4$'])
xtick = axInset.set_xticks([1.e4,5.e4])
if yticks is not None:
axInset.set_yticklabels([])
ytick = axInset.set_yticks(yticks)
else:
axInset = None
#print os.getcwd()
#
mat = lines.id == object
print '%s %.4f %.1f %.1f %.1e %.1f' %(object, lines.z_grism[mat][0], lines.oiii_eqw[mat][0], lines.oiii_eqw_err[mat][0], lines.oiii_flux[mat][0], lines.hbeta_eqw[mat][0])
if show_line_stats:
if (lines.z_grism[mat][0] < 1.5) & (lines.halpha_eqw_err[mat][0] > 0):
axSpec.text(line_stats_pos[0], line_stats_pos[1], r'${\rm EW}_{\rm H\alpha}=%d\pm%d,\ f_{\rm H\alpha}=%.1f\pm%.1f$' %(lines.halpha_eqw[mat][0], lines.halpha_eqw_err[mat][0], lines.halpha_flux[mat][0]/1.e-17, lines.halpha_eqw_err[mat][0]/lines.halpha_eqw[mat][0]*lines.halpha_flux[mat][0]/1.e-17), horizontalalignment='left', transform=axSpec.transAxes, backgroundcolor='white', fontsize=9)
#
if (lines.z_grism[mat][0] > 1.19) & (lines.z_grism[mat][0] < 2.3) & (lines.oiii_eqw_err[mat][0] > 0):
axSpec.text(line_stats_pos[0]+0.45, line_stats_pos[1], r'${\rm EW}_{\rm OIII}=%d\pm%d,\ f_{\rm OIII}=%.1f\pm%.1f$' %(lines.oiii_eqw[mat][0], lines.oiii_eqw_err[mat][0], lines.oiii_flux[mat][0]/1.e-17, lines.oiii_eqw_err[mat][0]/lines.oiii_eqw[mat][0]*lines.oiii_flux[mat][0]/1.e-17), horizontalalignment='left', transform=axSpec.transAxes, backgroundcolor='white', fontsize=9)
unicorn.catalogs.savefig(fig, object+'_display.pdf')
return fig, ax2D, axThumb, axSpec, axInset
|
mit
|
AndreasMadsen/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/classifier_test.py
|
16
|
5175
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.train.limit_epochs(
tf.reshape(tf.constant(iris.data), [-1, 4]), num_epochs=num_epochs)
labels = tf.reshape(tf.constant(iris.target), [-1])
return features, labels
def logistic_model_fn(features, labels, unused_mode):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, labels, unused_mode, params):
labels = tf.one_hot(labels, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, labels)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data))
predictions_proba = list(est.predict_proba(x=iris.data))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
|
apache-2.0
|
gandalf221553/CodeSection
|
kivy_matplotlib.py
|
1
|
25927
|
import kivy
kivy.require('1.9.1') # replace with your current kivy version !
############
#per installare i garden components
#C:\Users\Von Braun\Downloads\WinPython-64bit-3.5.2.3Qt5\python-3.5.2.amd64\Scripts
#https://docs.scipy.org/doc/numpy/f2py/index.html
#!python garden install nomefile
############
from kivy.app import App
from kivy.lang import Builder
from kivy.config import Config
from kivy.core.window import Window
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ListProperty
from kivy.uix.widget import Widget
import numpy as np
np.set_printoptions(threshold=np.nan)
#from kivy.app import App
#from kivy.uix.floatlayout import FloatLayout
from kivy.factory import Factory
#from kivy.properties import ObjectProperty
from kivy.uix.popup import Popup
import os
#from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
"""Simple widget to display a matplolib figure in kivy"""
#from kivy.uix.widget import Widget
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backend_bases import NavigationToolbar2
from kivy.graphics.texture import Texture
from kivy.properties import ObjectProperty
from kivy.base import EventLoop
import math
from kivy.graphics import Mesh
Config.set('graphics', 'fullscreen', 1)
Window.size = (700,600)
Config.set('graphics','resizable',0)
printa=0
if printa:
print(Window.size)
if 0:
fullscreen=0
if fullscreen:
Window.size = (Window._get_width(),Window._get_height())
if printa:
print(os.getcwd())
class LblTxt(BoxLayout):
from kivy.properties import ObjectProperty
theTxt = ObjectProperty(None)
"""
class CheckLista(BoxLayout):
from kivy.uix.checkbox import CheckBox
CheckForm = CheckBox()
"""
#https://github.com/jeysonmc/kivy_matplotlib/blob/master/README.md
#http://pythonmobile.blogspot.it/2014/06/21-checkboxes.html
class MatplotFigure(Widget):
"""Widget to show a matplotlib figure in kivy.
The figure is rendered internally in an AGG backend then
the rgb data is obtained and blitted into a kivy texture.
"""
figure = ObjectProperty(None)
_box_pos = ListProperty([0, 0])
_box_size = ListProperty([0, 0])
_img_texture = ObjectProperty(None)
_bitmap = None
_pressed = False
figcanvas = ObjectProperty(None)
# I Chose composition over MI because of name clashes
def on_figure(self, obj, value):
self.figcanvas = _FigureCanvas(self.figure, self)
self.figcanvas._isDrawn = False
l, b, w, h = self.figure.bbox.bounds
#print(l,b,w,h)
w = int(math.ceil(w))
h = int(math.ceil(h))
self.width = w
self.height = h
# Texture
self._img_texture = Texture.create(size=(w, h))
def __init__(self, figure=None, *args, **kwargs):
super(MatplotFigure, self).__init__(*args, **kwargs)
self.figure = figure
# Event binding
EventLoop.window.bind(mouse_pos=self.on_mouse_move)
self.bind(size=self._onSize)
def _draw_bitmap(self):
if self._bitmap is None:
print("No bitmap!")
return
self._img_texture = Texture.create(size=(self.bt_w, self.bt_h))
self._img_texture.blit_buffer(
self._bitmap, colorfmt="rgb", bufferfmt='ubyte')
self._img_texture.flip_vertical()
def on_mouse_move(self, window, mouse_pos):
""" Mouse move """
if self._pressed: # Do not process this event if there's a touch_move
return
x, y = mouse_pos
if self.collide_point(x, y):
real_x, real_y = x - self.pos[0], y - self.pos[1]
self.figcanvas.motion_notify_event(real_x, real_y, guiEvent=None)
def on_touch_down(self, event):
x, y = event.x, event.y
if self.collide_point(x, y):
self._pressed = True
real_x, real_y = x - self.pos[0], y - self.pos[1]
self.figcanvas.button_press_event(real_x, real_y, 1, guiEvent=event)
def on_touch_move(self, event):
""" Mouse move while pressed """
x, y = event.x, event.y
if self.collide_point(x, y):
real_x, real_y = x - self.pos[0], y - self.pos[1]
self.figcanvas.motion_notify_event(real_x, real_y, guiEvent=event)
def on_touch_up(self, event):
x, y = event.x, event.y
if self._box_size[0] > 1 or self._box_size[1] > 1:
self.reset_box()
if self.collide_point(x, y):
pos_x, pos_y = self.pos
real_x, real_y = x - pos_x, y - pos_y
self.figcanvas.button_release_event(real_x, real_y, 1, guiEvent=event)
self._pressed = False
def new_timer(self, *args, **kwargs):
pass # TODO
def _onSize(self, o, size):
if self.figure is None:
return
# Creat a new, correctly sized bitmap
self._width, self._height = size
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch)
self.figcanvas.resize_event()
self.figcanvas.draw()
def reset_box(self):
self._box_size = 0, 0
self._box_pos = 0, 0
def draw_box(self, event, x0, y0, x1, y1):
pos_x, pos_y = self.pos
# Kivy coords
y0 = pos_y + y0
y1 = pos_y + y1
self._box_pos = x0, y0
self._box_size = x1 - x0, y1 - y0
class _FigureCanvas(FigureCanvasAgg):
"""Internal AGG Canvas"""
def __init__(self, figure, widget, *args, **kwargs):
self.widget = widget
super(_FigureCanvas, self).__init__(figure, *args, **kwargs)
def draw(self):
"""
Render the figure using agg.
"""
super(_FigureCanvas, self).draw()
agg = self.get_renderer()
w, h = agg.width, agg.height
self._isDrawn = True
self.widget.bt_w = w
self.widget.bt_h = h
self.widget._bitmap = agg.tostring_rgb()
self.widget._draw_bitmap()
def blit(self, bbox=None):
# TODO bbox
agg = self.get_renderer()
w, h = agg.width, agg.height
self.widget._bitmap = agg.tostring_rgb()
self.widget.bt_w = w
self.widget.bt_h = h
self.widget._draw_bitmap()
#def print_figure(self,filename, *args, **kwargs):
#http://stackoverflow.com/questions/17538235/unable-to-save-matplotlib-figure-figure-canvas-is-none
#http://answers.elteacher.info/questions/post/229454/plot-radec-polygons-with-astropy-wcs-aplpy-fitsfigure-ask-question.html
#https://www.google.it/search?q=kivy+super+print_figure&ie=utf-8&oe=utf-8&client=firefox-b-ab&gfe_rd=cr&ei=jHGxWO2YK_CEygWStrPADQ
#https://github.com/dbuscombe-usgs/lobos/blob/master/kivy_matplotlib.py
"""
finchenonlomettiapposto=0
if finchenonlomettiapposto:
super(self.print_figure, self).print_figure(filename, *args, **kwargs)
if self._isDrawn:
self.draw()
"""
class MatplotNavToolbar(BoxLayout):
"""Figure Toolbar"""
pan_btn = ObjectProperty(None)
zoom_btn = ObjectProperty(None)
home_btn = ObjectProperty(None)
info_lbl = ObjectProperty(None)
_navtoolbar = None # Internal NavToolbar logic
figure_widget = ObjectProperty(None)
def __init__(self, figure_widget=None, *args, **kwargs):
super(MatplotNavToolbar, self).__init__(*args, **kwargs)
self.figure_widget = figure_widget
def on_figure_widget(self, obj, value):
self.figure_widget.bind(figcanvas=self._canvas_ready)
def _canvas_ready(self, obj, value):
self._navtoolbar = _NavigationToolbar(value, self)
self._navtoolbar.figure_widget = obj
class _NavigationToolbar(NavigationToolbar2):
figure_widget = None
def __init__(self, canvas, widget):
self.widget = widget
super(_NavigationToolbar, self).__init__(canvas)
def _init_toolbar(self):
self.widget.home_btn.bind(on_press=self.home)
self.widget.bind(on_press=self.pan)
self.widget.zoom_btn.bind(on_press=self.zoom)
def dynamic_update(self):
self.canvas.draw()
def draw_rubberband(self, event, x0, y0, x1, y1):
self.figure_widget.draw_box(event, x0, y0, x1, y1)
def set_message(self, s):
self.widget.info_lbl.text = s
class LoadDialog(BoxLayout):
load = ObjectProperty(None)
cancel = ObjectProperty(None)
class SaveDialog(BoxLayout):
save = ObjectProperty(None)
cancel = ObjectProperty(None)
Factory.register('LoadDialog', cls=LoadDialog)
Factory.register('SaveDialog', cls=SaveDialog)
Factory.register('MatplotFigure', cls=MatplotFigure)
Factory.register('MatplotNavToolbar', cls=MatplotNavToolbar)
if __name__ == '__main__':
# Example
import matplotlib as mpl
import numpy as np
class CalcolatriceApp(App):
##########################################################################
loadfile = ObjectProperty(None)
savefile = ObjectProperty(None)
text_input = ObjectProperty(None)
def build_mesh(self):
from math import sin, cos, pi
""" returns a Mesh of a rough circle. """
vertices = []
indices = []
step = 10
istep = (pi * 2) / float(step)
for i in range(step):
x = 300 + cos(istep * i) * 100
y = 300 + sin(istep * i) * 100
vertices.extend([x, y, 0, 0])
indices.append(i)
"""
Mesh:
vertices: (x1, y1, s1, v1, x2, y2, s2, v2, x3, y3, s3, v3...)
indices: (1, 2, 3...)
texture: some_texture
rgba: 1,1,1,1
mode: some_mode
"""
#ritorna una area colorata chiusa
return Mesh(vertices=vertices, indices=indices, mode='triangle_fan')
#return Mesh(vertices=vertices, indices=indices, mode='line_loop')
def dismiss_popup(self):
self._popup.dismiss()
def show_load(self):
content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
self._popup = Popup(title="Carica File", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def show_save(self):
content = SaveDialog(save=self.save, cancel=self.dismiss_popup)
self._popup = Popup(title="Salva File", content=content,
size_hint=(0.9, 0.9))
self._popup.open()
def load(self, path, filename):
self.stringa=np.asmatrix(np.genfromtxt(os.path.join(path, filename[0]),delimiter=","))
print(self.stringa)
print(filename)
self.vada=np.size(self.stringa,0)-1
#print(self.vada)
self.root.ids.nomArch.theTxt.text=filename[0]
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
axes = fig.gca()
from calcolatrice.stampafigura import disegna
disegna(self,self.stringa)
figure_wgt = self.root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
#with open(os.path.join(path, filename[0])) as stream:
#self.text_input.text = stream.read()
self.dismiss_popup()
def save(self, path, filename):
#with open(, 'w') as stream:
nome=self.root.ids.nomArch.theTxt.text
#print("dd"+nome+"dd")
strada=os.getcwd()+"\\" + nome
#print(os.getcwd())
#print(os.path.join(path, filename[0]))
#stream.write(self.stringa)
#print(strada)
np.savetxt(strada, self.stringa, delimiter=',', newline='\n')
self.dismiss_popup()
def salvaauto(self,*args):
if self.vada>0:
nome=self.root.ids.nomArch.theTxt.text
estensione=".csv"
strada=os.getcwd()+"\\" + nome
nomeTemp=nome
if nome=="":
k=0
nomeTemp="ciccione"+"0"+str(k)+str(estensione)
strada=os.getcwd()+"\\"+nomeTemp
while os.path.isfile(strada)==True:
nomeTemp="ciccione"+"0"+str(k)+str(estensione)
strada=os.getcwd()+"\\"+nomeTemp
k=k+1
#print(strada)
np.savetxt(strada, self.stringa, delimiter=',', newline='\n')
self.root.ids.nomArch.theTxt.text=nomeTemp
##########################################################################
title = "Disegnatore di Biancardi"
#stringa= MatrixProperty()
#Status=StringProperty()
def UndoZZZZ(self,*args):
if self.vada>0:
self.vada=self.vada-1
self.stringa=self.stringa[:-1,:]
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
axes = fig.gca()
figure_wgt = self.root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
from calcolatrice.stampafigura import disegna
disegna(self,self.stringa)
self.root.ids.risoluzione.text="figure inserite %d"%self.vada
#self.stringa=np.matrix("42015.,3.,1.,48.,0.,0.,0.,0.,0.,0.,0.;4.,1.,0.,0.,0.,0.,6.,10.,6.,10.,0.;2.,-1.,0.,4.,0.,3.,0.,3.1415,0.,0.,0.")
def Resetta(self,*args):
if self.vada>0:
self.stringa=self.iniziale
#self.root.ids.schifo.text=print(self.stringa)
#print(self.stringa)
self.vada=0
#self.root.ids.schifo.text=""
self.root.ids.risoluzione.text="resettato"
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
fig.clf()
figure_wgt = self.root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
self.root.ids.risoluzione.text="figure inserite %d"%self.vada
def SalvaDisegno(self,*args):
if self.vada>0:
#print(self.root.ids.figure_wgt.figure.figure)
#print(self.root.ids.figure_wgt.figure.bbox.bounds)
#print(self.root.ids.figure_wgt.figure.dpi)
#self.root.ids.figure_wgt.figure.savefig(filename)
nome=self.root.ids.nomArch.theTxt.text
estensione=".png"
strada=os.getcwd()+"\\" + nome
nomeTemp=nome
if nome=="":
k=0
nomeTemp="ciccione"+"0"+str(k)+estensione
strada=os.getcwd()+"\\"+nomeTemp
while os.path.isfile(strada)==True:
nomeTemp="ciccione"+"0"+str(k)+estensione
strada=os.getcwd()+"\\"+nomeTemp
k=k+1
#print(strada)
self.root.ids.nomArch.theTxt.text=nomeTemp
self.root.ids.figure_wgt.export_to_png(self.root.ids.nomArch.theTxt.text)
#from matplotlib.backends.backend_pdf import PdfPages
#with PdfPages('multipage_pdf.pdf') as pdf:
#pdf.savefig(self.root.ids.figure_wgt.figure)
def BottonePremutoNocciolo(self,*args):
if self.vuoifareancheilnocciolo==0:
self.vuoifareancheilnocciolo=1
self.iniziale[0,2]=1
elif self.vuoifareancheilnocciolo==1:
self.vuoifareancheilnocciolo=0
self.iniziale[0,2]=0
print('The checkbox is active')
def build(self):
# Matplotlib stuff, figure and plot
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
axes = fig.gca()
#axes.set_xlim(0, 50)
#axes.grid(True)
#fig.clf()
axes.axis("on")
#axes.set_xlim(0, 50)
#axes.set_aspect('equal')
# Kivy stuff
root = Builder.load_file("nuovaForma.kv")
figure_wgt = root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
self.root=root
self.root.ids.risoluzione.text="figure inserite %d"%self.vada
return root
def __init__(self, **kwargs):
super(CalcolatriceApp, self).__init__(**kwargs)
self.mmma=2
self.mmmb=2
self.vada=0
self.scalatore=1000
self.kk=3
self.discretizzaarchicerchi=80
self.vuoifareancheilnocciolo=1
self.contorna=1
self.vuota=np.matrix("0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.")
self.iniziale=np.matrix("0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.")
self.iniziale[0,0]=42015.
self.iniziale[0,1]=self.kk
self.iniziale[0,2]=self.vuoifareancheilnocciolo
self.iniziale[0,3]=self.discretizzaarchicerchi
self.iniziale[0,4]=self.contorna
self.stringa=self.iniziale
self.vada=0
#print(self.stringa)
def Internet(*args):
"""
For documentation of the webbrowser module,
see http://docs.python.org/library/webbrowser.html
"""
import webbrowser
new = 2 # open in a new tab, if possible
# open a public URL, in this case, the webbrowser docs
url = "https://www.facebook.com/francescoscodesection\n"
webbrowser.open(url,new=new)
# open an HTML file on my own (Windows) computer
#url = "file://X:/MiscDev/language_links.html"
#webbrowser.open(url,new=new)
def Calcolalo(self,*args):
#import os
#os.chdir("C:\Users\Von Braun\Google Drive\mat2pylab\calcolatrice")
noncidevonoessereaste=1
from calcolatrice.misuras import guardasecisonoaste
noncidevonoessereaste=guardasecisonoaste(self)
if noncidevonoessereaste and 0:
self.stringa[0,2]=0
self.vuoifareancheilnocciolo=0
if self.vada>0:
import time
b0=time.clock()
self.salvaauto(self)
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
axes = fig.gca()
figure_wgt = self.root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
#print(self.root.ids.figure_wgt.figure)
if self.root.ids.quantidiscreti.theTxt.text!="":
self.stringa[0,3]=int(self.root.ids.quantidiscreti.theTxt.text)
from calcolatrice.principale import codicesezione
self.root.ids.risoluzione.text=codicesezione(self)
#filename=self.root.ids.nomArch.theTxt.text
#mpl.draw()
#self.print_figure(self, filename)
b=time.clock()
print("tempo",round(b-b0,4))
#self.uscita="cacca"+"\n"
#self.root.ids.risoluzione.text=self.uscita
else:
self.root.ids.risoluzione.text="figure inserite %d"%self.vada
def CreaSalvataggio(self,*args):
if args[1]==1:
val2=0. if args[2]=="" else str(args[2])
val3=0. if args[3]=="" else str(args[3])
val4=0. if args[4]=="" else str(args[4])
val5=0. if args[5]=="" else str(args[5])
if val2+val3+val4+val5!=0:
#val="1 "+val2+" 0 "+val3+" "+val4+" "+val5
from calcolatrice.misuras import UnireMatriciCol
self.stringa=UnireMatriciCol(self.stringa,self.vuota)
self.vada=self.vada+1
self.stringa[self.vada,0]=1.
self.stringa[self.vada,1]=float(val2)
self.stringa[self.vada,3]=float(val3)
self.stringa[self.vada,4]=float(val4)
self.stringa[self.vada,5]=float(val5)
#print("10000",val)
#stringa=stringa+args[1]+" "+args[2]+" "+args[3]+" "+args[4]+";"
if args[1]==2:
val2=0. if args[2]=="" else float(args[2])
val3=0. if args[3]=="" else float(args[3])
val4=0. if args[4]=="" else float(args[4])
val5=0. if args[5]=="" else float(args[5])
val6=0. if args[6]=="" else float(args[6])
val7=0. if args[7]=="" else float(args[7])
if val2+val3+val4+val5+val6+val7!=0:
self.vada=self.vada+1
#val="2 "+val2+" 0 "+val3+" "+val4+" "+val5+" "+val6+" "+val7
from calcolatrice.misuras import UnireMatriciColNon
self.stringa=UnireMatriciColNon(self.stringa,self.vuota)
self.stringa[self.vada,0]=2.
self.stringa[self.vada,1]=float(val2)
self.stringa[self.vada,3]=float(val3)
self.stringa[self.vada,4]=float(val4)
self.stringa[self.vada,5]=float(val5)
self.stringa[self.vada,6]=float(val6)*np.pi/180
self.stringa[self.vada,7]=float(val7)*np.pi/180
#print("20000",val)
#stringa=stringa+args[1]+" "+args[2]+" "+args[3]+" "+args[4]+" "+args[5]+" "+args[6]+";"
if args[1]==2.5:
val2=0. if args[2]=="" else str(args[2])
val3=0. if args[3]=="" else str(args[3])
val4=0. if args[4]=="" else str(args[4])
val5=0. if args[5]=="" else str(args[5])
val6=0. if args[6]=="" else str(args[6])
val7=0. if args[7]=="" else str(args[7])
if val2+val3+val4+val5+val6+val7!=0:
self.vada=self.vada+1
#val="2 "+val2+" 0 "+val3+" "+val4+" "+val5+" "+val6+" "+val7
from calcolatrice.misuras import UnireMatriciColNon
self.stringa=UnireMatriciColNon(self.stringa,self.vuota)
self.stringa[self.vada,0]=2.5
self.stringa[self.vada,1]=float(val2)
self.stringa[self.vada,3]=float(val3)
self.stringa[self.vada,4]=float(val4)
self.stringa[self.vada,5]=float(val5)
self.stringa[self.vada,6]=float(val6)
self.stringa[self.vada,7]=float(val7)
if args[1]=="":
inter=0
else:
inter=int(args[1])
if printa:
print("inter",inter)
if inter>=3:
#print(*args)
#print(args[0])
#print(args[1])
#print(args[2])
#print(args[3])
val1=0. if args[1]=="" else float(args[1])
val2=0. if args[2]=="" else float(args[2])
val3=0. if args[3]=="" else str(args[3])
from calcolatrice.misuras import UnireMatriciCol,UnireMatriciRig
self.vada=self.vada+1
#val=val1 +" "+val2+" 0 "+val3
from calcolatrice.misuras import StringaToMatrix
val4=StringaToMatrix(val3)
if np.size(val4)==2*val1:
print("ok")
else:
val1=np.size(val4)
print("non sono giuste")
#self.root.ids.n
val5=np.matrix("0. 0. 0.")
val5[0,0]=float(val1)
val5[0,1]=float(val2)
#print(val4)
val6=UnireMatriciRig(val5,val4)
#self.stringa[self.vada,3]=float(val3)
#print(val6,type(val6))
from calcolatrice.misuras import UnireMatriciColNon
self.stringa=UnireMatriciColNon(self.stringa,val6)
#if printa:
#print(self.stringa)
#print("30000",val)
#print("fine",self.stringa)
#self.stringa=self.stringa+val+";"
#print("finish ",self.stxringa)
fig = mpl.figure.Figure(figsize=(self.mmma, self.mmmb))
axes = fig.gca()
figure_wgt = self.root.ids['figure_wgt'] # MatplotFigure
figure_wgt.figure = fig
from calcolatrice.stampafigura import disegna
disegna(self,self.stringa)
self.root.ids.risoluzione.text="figure inserite %d"%self.vada
#mpl.savefig("ciccio.png")
#stampa=self.stringa
#self.root.ids.schifo.text="booooooooh"
#self.root.ids.schifo.text=print(stampa)
#str.replace(stampa,";",";\n")
#disegna(self,numero1,kk,discretizzaarchicerchi)
return self
CalcolatriceApp().run()
"""
There are three keywords specific to Kv language:
app: always refers to the instance of your application.
root: refers to the base widget/template in the current rule
self: always refer to the current widget
"""
#print(Window._get_width())
#print(Window._get_height())
#Builder.unload_file(root,file1)
#Builder._clear_matchcache(self)
#unbind_property(self, widget, name)
#password=True
#multiline=False
#pulsante salva disegno
#pulsante leggi file esterno
#pulsante leggi dxf
#pulsante salva file
#press the row under here to cast another kv file
#":include load.kv"
|
mit
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
58
|
71789
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
|
mit
|
vybstat/scikit-learn
|
examples/text/hashing_vs_dict_vectorizer.py
|
284
|
3265
|
"""
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
|
bsd-3-clause
|
eWaterCycle/ewatercycle
|
ewatercycle/config/_validators.py
|
1
|
5383
|
"""List of config validators."""
import warnings
from collections.abc import Iterable
from functools import lru_cache
from pathlib import Path
class ValidationError(ValueError):
"""Custom validation error."""
# The code for this function was taken from matplotlib (v3.3) and modified
# to fit the needs of eWaterCycle. Matplotlib is licenced under the terms of
# the the 'Python Software Foundation License'
# (https://www.python.org/psf/license)
def _make_type_validator(cls, *, allow_none=False):
"""Construct a type validator for `cls`.
Return a validator that converts inputs to *cls* or raises (and
possibly allows ``None`` as well).
"""
def validator(inp):
looks_like_none = isinstance(inp, str) and (inp.lower() == "none")
if allow_none and (inp is None or looks_like_none):
return None
try:
return cls(inp)
except ValueError as err:
if isinstance(cls, type):
raise ValidationError(
f"Could not convert {repr(inp)} to {cls.__name__}"
) from err
raise
validator.__name__ = f"validate_{cls.__name__}"
if allow_none:
validator.__name__ += "_or_None"
validator.__qualname__ = (
validator.__qualname__.rsplit(".", 1)[0] + "." + validator.__name__
)
return validator
# The code for this function was taken from matplotlib (v3.3) and modified
# to fit the needs of eWaterCycle. Matplotlib is licenced under the terms of
# the the 'Python Software Foundation License'
# (https://www.python.org/psf/license)
@lru_cache()
def _listify_validator(
scalar_validator, allow_stringlist=False, *, n_items=None, docstring=None
):
"""Apply the validator to a list."""
def func(inp):
if isinstance(inp, str):
try:
inp = [
scalar_validator(val.strip())
for val in inp.split(",")
if val.strip()
]
except Exception:
if allow_stringlist:
# Sometimes, a list of colors might be a single string
# of single-letter colornames. So give that a shot.
inp = [
scalar_validator(val.strip())
for val in inp
if val.strip()
]
else:
raise
# Allow any ordered sequence type -- generators, np.ndarray, pd.Series
# -- but not sets, whose iteration order is non-deterministic.
elif isinstance(inp, Iterable) and not isinstance(
inp, (set, frozenset)
):
# The condition on this list comprehension will preserve the
# behavior of filtering out any empty strings (behavior was
# from the original validate_stringlist()), while allowing
# any non-string/text scalar values such as numbers and arrays.
inp = [
scalar_validator(val)
for val in inp
if not isinstance(val, str) or val
]
else:
raise ValidationError(
f"Expected str or other non-set iterable, but got {inp}"
)
if n_items is not None and len(inp) != n_items:
raise ValidationError(
f"Expected {n_items} values, "
f"but there are {len(inp)} values in {inp}"
)
return inp
try:
func.__name__ = "{}list".format(scalar_validator.__name__)
except AttributeError: # class instance.
func.__name__ = "{}List".format(type(scalar_validator).__name__)
func.__qualname__ = (
func.__qualname__.rsplit(".", 1)[0] + "." + func.__name__
)
if docstring is not None:
docstring = scalar_validator.__doc__
func.__doc__ = docstring
return func
def validate_path(value, allow_none=False):
"""Return a `Path` object."""
if (value is None) and allow_none:
return value
try:
path = Path(value).expanduser().absolute()
except TypeError as err:
raise ValidationError(f"Expected a path, but got {value}") from err
else:
return path
validate_string = _make_type_validator(str)
validate_string_or_none = _make_type_validator(str, allow_none=True)
validate_stringlist = _listify_validator(
validate_string, docstring="Return a list of strings."
)
validate_int = _make_type_validator(int)
validate_int_or_none = _make_type_validator(int, allow_none=True)
validate_float = _make_type_validator(float)
validate_floatlist = _listify_validator(
validate_float, docstring="Return a list of floats."
)
validate_path_or_none = _make_type_validator(validate_path, allow_none=True)
validate_pathlist = _listify_validator(
validate_path, docstring="Return a list of paths."
)
validate_dict_parameterset = _make_type_validator(dict, allow_none=True)
_validators = {
"esmvaltool_config": validate_path_or_none,
"grdc_location": validate_path_or_none,
"container_engine": validate_string_or_none,
"singularity_dir": validate_path_or_none,
"output_dir": validate_path_or_none,
"parameterset_dir": validate_path_or_none,
"parameter_sets": validate_dict_parameterset,
"ewatercycle_config": validate_path_or_none,
}
|
apache-2.0
|
karstenw/nodebox-pyobjc
|
examples/Extended Application/matplotlib/examples/subplots_axes_and_figures/fahrenheit_celsius_scales.py
|
1
|
1776
|
"""
=================================
Different scales on the same axes
=================================
Demo of how to display two scales on the left and right y axis.
This example uses the Fahrenheit and Celsius scales.
"""
import matplotlib.pyplot as plt
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
def fahrenheit2celsius(temp):
"""
Returns temperature in Celsius.
"""
return (5. / 9.) * (temp - 32)
def convert_ax_c_to_celsius(ax_f):
"""
Update second axis according with first axis.
"""
y1, y2 = ax_f.get_ylim()
ax_c.set_ylim(fahrenheit2celsius(y1), fahrenheit2celsius(y2))
ax_c.figure.canvas.draw()
fig, ax_f = plt.subplots()
ax_c = ax_f.twinx()
# automatically update ylim of ax2 when ylim of ax1 changes.
ax_f.callbacks.connect("ylim_changed", convert_ax_c_to_celsius)
ax_f.plot(np.linspace(-40, 120, 100))
ax_f.set_xlim(0, 100)
ax_f.set_title('Two scales: Fahrenheit and Celsius')
ax_f.set_ylabel('Fahrenheit')
ax_c.set_ylabel('Celsius')
pltshow(plt)
|
mit
|
tmhm/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
sposs/DIRAC
|
Core/Utilities/Graphs/Legend.py
|
11
|
7713
|
########################################################################
# $HeadURL$
########################################################################
""" Legend encapsulates a graphical plot legend drawing tool
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
from matplotlib.patches import Rectangle
from matplotlib.text import Text
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
from DIRAC.Core.Utilities.Graphs.Palette import Palette
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
import types
class Legend:
def __init__(self,data=None,axes=None,*aw,**kw):
self.labels = {}
if type(data) == types.DictType:
for label,ddict in data.items():
#self.labels[label] = pretty_float(max([ float(x) for x in ddict.values() if x ]) )
self.labels[label] = "%.1f" % max([ float(x) for x in ddict.values() if x ])
elif type(data) == types.InstanceType and data.__class__ == GraphData:
self.labels = data.getLabels()
else:
self.labels = data
#self.labels.reverse()
self.ax = axes
self.canvas = None
if self.ax:
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
self.prefs = evalPrefs(*aw,**kw)
self.palette = Palette()
if self.labels and self.labels[0][0] != 'NoLabels':
percent_flag = self.prefs.get('legend_unit','')
if percent_flag == "%":
sum_value = sum(data.label_values)
if sum_value > 0.:
self.labels = [(l,v/sum_value*100.) for l,v in self.labels ]
self.__get_column_width()
def dumpPrefs(self):
for key in self.prefs:
print key.rjust(20),':',str(self.prefs[key]).ljust(40)
def setLabels(self,labels):
self.labels = labels
def setAxes(self,axes):
self.ax = axes
self.canvas = self.ax.figure.canvas
self.ax.set_axis_off()
def getLegendSize(self):
self.__get_column_width()
legend_position = self.prefs['legend_position']
legend_width = float(self.prefs['legend_width'])
legend_height = float(self.prefs['legend_height'])
legend_padding = float(self.prefs['legend_padding'])
legend_text_size = self.prefs.get('legend_text_size',self.prefs['text_size'])
legend_text_padding = self.prefs.get('legend_text_padding',self.prefs['text_padding'])
if legend_position in ['right','left']:
# One column in case of vertical legend
legend_width = self.column_width+legend_padding
nLabels = len(self.labels)
legend_max_height = nLabels*(legend_text_size+legend_text_padding)
elif legend_position == 'bottom':
nColumns = min(self.prefs['legend_max_columns'],int(legend_width/self.column_width))
nLabels = len(self.labels)
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(legend_height/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
text_padding = self.prefs['text_padding']
text_padding = pixelToPoint(text_padding,self.prefs['dpi'])
legend_height = min(legend_height,(nRows*(self.text_size+text_padding)+text_padding))
legend_max_height = nLabels*(self.text_size+text_padding)
return legend_width,legend_height,legend_max_height
def __get_legend_text_size(self):
dpi = self.prefs['dpi']
text_size = self.prefs['text_size']
text_padding = self.prefs['text_padding']
legend_text_size = self.prefs.get('legend_text_size',text_size)
legend_text_padding = self.prefs.get('legend_text_padding',text_padding)
return legend_text_size,legend_text_padding
def __get_column_width(self):
max_length = 0
max_column_text = ''
flag = self.prefs.get('legend_numbers',True)
unit = self.prefs.get('legend_unit',False)
for label,num in self.labels:
if not flag: num = None
if num is not None:
column_length = len(str(label)+str(num)) + 1
else:
column_length = len(str(label)) + 1
if column_length > max_length:
max_length = column_length
if flag:
if type(num) == types.IntType or type(num) == types.LongType:
numString = str(num)
else:
numString = "%.1f" % float(num)
max_column_text = '%s %s' % (str(label),numString)
if unit:
max_column_text += "%"
else:
max_column_text = '%s ' % str(label)
figure = Figure()
canvas = FigureCanvasAgg(figure)
dpi = self.prefs['dpi']
figure.set_dpi( dpi )
l_size,l_padding = self.__get_legend_text_size()
self.text_size = pixelToPoint(l_size,dpi)
text = Text(0.,0.,text=max_column_text,size=self.text_size)
text.set_figure(figure)
bbox = text.get_window_extent(canvas.get_renderer())
self.column_width = bbox.width+6*l_size
def draw(self):
dpi = self.prefs['dpi']
ax_xsize = self.ax.get_window_extent().width
ax_ysize = self.ax.get_window_extent().height
nLabels = len(self.labels)
nColumns = min(self.prefs['legend_max_columns'],int(ax_xsize/self.column_width))
maxRows = self.prefs['legend_max_rows']
nRows_ax = int(ax_ysize/1.6/self.prefs['text_size'])
nRows_label = nLabels/nColumns + (nLabels%nColumns != 0)
nRows = max(1,min(min(nRows_label,maxRows),nRows_ax ))
maxLabels = nColumns*nRows - 1
self.ax.set_xlim(0.,float(ax_xsize))
self.ax.set_ylim(-float(ax_ysize),0.)
legend_text_size,legend_text_padding = self.__get_legend_text_size()
legend_text_size_point = pixelToPoint(legend_text_size,dpi)
box_width = legend_text_size
legend_offset = (ax_xsize - nColumns*self.column_width)/2
nc = 0
#self.labels.reverse()
for label,num in self.labels:
num_flag = self.prefs.get('legend_numbers',True)
percent_flag = self.prefs.get('legend_unit','')
if num_flag:
if percent_flag == "%":
num = "%.1f" % num +'%'
else:
num = "%.1f" % num
else:
num = None
color = self.palette.getColor(label)
row = nc%nRows
column = nc/nRows
if row == nRows-1 and column == nColumns-1 and nc != nLabels-1:
last_text = '... plus %d more' % (nLabels-nc)
self.ax.text(float(column*self.column_width)+legend_offset,-float(row*1.6*box_width),
last_text,horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
break
else:
self.ax.text(float(column*self.column_width)+2.*box_width+legend_offset,-row*1.6*box_width,
str(label),horizontalalignment='left',
verticalalignment='top',size=legend_text_size_point)
if num is not None:
self.ax.text(float((column+1)*self.column_width)-2*box_width+legend_offset,-float(row*1.6*box_width),
str(num),horizontalalignment='right',
verticalalignment='top',size=legend_text_size_point)
box = Rectangle((float(column*self.column_width)+legend_offset,-float(row*1.6*box_width)-box_width),
box_width,box_width)
box.set_ec('black')
box.set_linewidth(pixelToPoint(0.5,dpi))
box.set_fc(color)
self.ax.add_patch(box)
nc += 1
|
gpl-3.0
|
hrjn/scikit-learn
|
examples/linear_model/plot_ard.py
|
32
|
3912
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
We also plot predictions and uncertainties for ARD
for one dimensional regression using polynomial feature expansion.
Note the uncertainty starts going up on the right side of the plot.
This is because these test samples are outside of the range of the training
samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights, the histogram of the
# weights, and predictions with standard deviations
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
# Plotting some predictions for polynomial regression
def f(x, noise_amount):
y = np.sqrt(x) * np.sin(x)
noise = np.random.normal(0, 1, len(x))
return y + noise_amount * noise
degree = 10
X = np.linspace(0, 10, 100)
y = f(X, noise_amount=1)
clf_poly = ARDRegression(threshold_lambda=1e5)
clf_poly.fit(np.vander(X, degree), y)
X_plot = np.linspace(0, 11, 25)
y_plot = f(X_plot, noise_amount=0)
y_mean, y_std = clf_poly.predict(np.vander(X_plot, degree), return_std=True)
plt.figure(figsize=(6, 5))
plt.errorbar(X_plot, y_mean, y_std, color='navy',
label="Polynomial ARD", linewidth=2)
plt.plot(X_plot, y_plot, color='gold', linewidth=2,
label="Ground Truth")
plt.ylabel("Output y")
plt.xlabel("Feature X")
plt.legend(loc="lower left")
plt.show()
|
bsd-3-clause
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/IPython/lib/tests/test_latextools.py
|
8
|
3869
|
# encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import nose.tools as nt
from IPython.lib import latextools
from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib
from IPython.utils.process import FindCmdError
def test_latex_to_png_dvipng_fails_when_no_cmd():
"""
`latex_to_png_dvipng` should return None when there is no required command
"""
for command in ['latex', 'dvipng']:
yield (check_latex_to_png_dvipng_fails_when_no_cmd, command)
def check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
nt.assert_equal(latextools.latex_to_png_dvipng("whatever", True),
None)
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_dvipng_runs():
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [(u"$$x^2$$", False), (u"x^2", True)]:
yield (latextools.latex_to_png_dvipng, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_dvipng, s, wrap)
@skipif_not_matplotlib
def test_latex_to_png_mpl_runs():
"""
Test that latex_to_png_mpl just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [("$x^2$", False), ("x^2", True)]:
yield (latextools.latex_to_png_mpl, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_mpl, s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
nt.assert_in("data:image/png;base64,iVBOR", img)
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("body text", False)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}''')
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return "path/to/breqn.sty"
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}''')
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}''')
|
mit
|
pratapvardhan/scikit-learn
|
sklearn/datasets/svmlight_format.py
|
19
|
16759
|
"""This module implements a loader and dumper for the svmlight format
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable to
predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
"""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from contextlib import closing
import io
import os.path
import numpy as np
import scipy.sparse as sp
from ._svmlight_format import _load_svmlight_file
from .. import __version__
from ..externals import six
from ..externals.six import u, b
from ..externals.six.moves import range, zip
from ..utils import check_array
from ..utils.fixes import frombuffer_empty
def load_svmlight_file(f, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load datasets in the svmlight / libsvm format into sparse CSR matrix
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
This format is used as the default format for both svmlight and the
libsvm command line programs.
Parsing a text based source can be expensive. When working on
repeatedly on the same dataset, it is recommended to wrap this
loader with joblib.Memory.cache to store a memmapped backup of the
CSR results of the first call and benefit from the near instantaneous
loading of memmapped structures for the subsequent calls.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
This implementation is written in Cython and is reasonably fast.
However, a faster API-compatible loader is also available at:
https://github.com/mblondel/svmlight-loader
Parameters
----------
f : {str, file-like, int}
(Path to) a file to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. A file-like or file descriptor will not be closed
by this function. A file-like object must be opened in binary mode.
n_features : int or None
The number of features to use. If None, it will be inferred. This
argument is useful to load several files that are subsets of a
bigger sliced dataset: each subset might not have examples of
every feature, hence the inferred shape might vary from one
slice to another.
multilabel : boolean, optional, default False
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based : boolean or "auto", optional, default "auto"
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id : boolean, default False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
X: scipy.sparse matrix of shape (n_samples, n_features)
y: ndarray of shape (n_samples,), or, in the multilabel a list of
tuples of length n_samples.
query_id: array of shape (n_samples,)
query_id for each sample. Only returned when query_id is set to
True.
See also
--------
load_svmlight_files: similar function for loading multiple files in this
format, enforcing the same number of features/columns on all of them.
Examples
--------
To use joblib.Memory to cache the svmlight file::
from sklearn.externals.joblib import Memory
from sklearn.datasets import load_svmlight_file
mem = Memory("./mycache")
@mem.cache
def get_data():
data = load_svmlight_file("mysvmlightfile")
return data[0], data[1]
X, y = get_data()
"""
return tuple(load_svmlight_files([f], n_features, dtype, multilabel,
zero_based, query_id))
def _gen_open(f):
if isinstance(f, int): # file descriptor
return io.open(f, "rb", closefd=False)
elif not isinstance(f, six.string_types):
raise TypeError("expected {str, int, file-like}, got %s" % type(f))
_, ext = os.path.splitext(f)
if ext == ".gz":
import gzip
return gzip.open(f, "rb")
elif ext == ".bz2":
from bz2 import BZ2File
return BZ2File(f, "rb")
else:
return open(f, "rb")
def _open_and_load(f, dtype, multilabel, zero_based, query_id):
if hasattr(f, "read"):
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# XXX remove closing when Python 2.7+/3.1+ required
else:
with closing(_gen_open(f)) as f:
actual_dtype, data, ind, indptr, labels, query = \
_load_svmlight_file(f, dtype, multilabel, zero_based, query_id)
# convert from array.array, give data the right dtype
if not multilabel:
labels = frombuffer_empty(labels, np.float64)
data = frombuffer_empty(data, actual_dtype)
indices = frombuffer_empty(ind, np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc) # never empty
query = frombuffer_empty(query, np.intc)
data = np.asarray(data, dtype=dtype) # no-op for float{32,64}
return data, indices, indptr, labels, query
def load_svmlight_files(files, n_features=None, dtype=np.float64,
multilabel=False, zero_based="auto", query_id=False):
"""Load dataset from multiple files in SVMlight format
This function is equivalent to mapping load_svmlight_file over a list of
files, except that the results are concatenated into a single, flat list
and the samples vectors are constrained to all have the same number of
features.
In case the file contains a pairwise preference constraint (known
as "qid" in the svmlight format) these are ignored unless the
query_id parameter is set to True. These pairwise preference
constraints can be used to constraint the combination of samples
when using pairwise loss functions (as is the case in some
learning to rank problems) so that only pairs with the same
query_id value are considered.
Parameters
----------
files : iterable over {str, file-like, int}
(Paths of) files to load. If a path ends in ".gz" or ".bz2", it will
be uncompressed on the fly. If an integer is passed, it is assumed to
be a file descriptor. File-likes and file descriptors will not be
closed by this function. File-like objects must be opened in binary
mode.
n_features: int or None
The number of features to use. If None, it will be inferred from the
maximum column index occurring in any of the files.
This can be set to a higher value than the actual number of features
in any of the input files, but setting it to a lower value will cause
an exception to be raised.
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
zero_based: boolean or "auto", optional
Whether column indices in f are zero-based (True) or one-based
(False). If column indices are one-based, they are transformed to
zero-based to match Python/NumPy conventions.
If set to "auto", a heuristic check is applied to determine this from
the file contents. Both kinds of files occur "in the wild", but they
are unfortunately not self-identifying. Using "auto" or True should
always be safe.
query_id: boolean, defaults to False
If True, will return the query_id array for each file.
dtype : numpy data type, default np.float64
Data type of dataset to be loaded. This will be the data type of the
output numpy arrays ``X`` and ``y``.
Returns
-------
[X1, y1, ..., Xn, yn]
where each (Xi, yi) pair is the result from load_svmlight_file(files[i]).
If query_id is set to True, this will return instead [X1, y1, q1,
..., Xn, yn, qn] where (Xi, yi, qi) is the result from
load_svmlight_file(files[i])
Notes
-----
When fitting a model to a matrix X_train and evaluating it against a
matrix X_test, it is essential that X_train and X_test have the same
number of features (X_train.shape[1] == X_test.shape[1]). This may not
be the case if you load the files individually with load_svmlight_file.
See also
--------
load_svmlight_file
"""
r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id))
for f in files]
if (zero_based is False
or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)):
for ind in r:
indices = ind[1]
indices -= 1
n_f = max(ind[1].max() for ind in r) + 1
if n_features is None:
n_features = n_f
elif n_features < n_f:
raise ValueError("n_features was set to {},"
" but input file contains {} features"
.format(n_features, n_f))
result = []
for data, indices, indptr, y, query_values in r:
shape = (indptr.shape[0] - 1, n_features)
X = sp.csr_matrix((data, indices, indptr), shape)
X.sort_indices()
result += X, y
if query_id:
result.append(query_values)
return result
def _dump_svmlight(X, y, f, multilabel, one_based, comment, query_id):
X_is_sp = int(hasattr(X, "tocsr"))
y_is_sp = int(hasattr(y, "tocsr"))
if X.dtype.kind == 'i':
value_pattern = u("%d:%d")
else:
value_pattern = u("%d:%.16g")
if y.dtype.kind == 'i':
label_pattern = u("%d")
else:
label_pattern = u("%.16g")
line_pattern = u("%s")
if query_id is not None:
line_pattern += u(" qid:%d")
line_pattern += u(" %s\n")
if comment:
f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n"
% __version__))
f.write(b("# Column indices are %s-based\n"
% ["zero", "one"][one_based]))
f.write(b("#\n"))
f.writelines(b("# %s\n" % line) for line in comment.splitlines())
for i in range(X.shape[0]):
if X_is_sp:
span = slice(X.indptr[i], X.indptr[i + 1])
row = zip(X.indices[span], X.data[span])
else:
nz = X[i] != 0
row = zip(np.where(nz)[0], X[i, nz])
s = " ".join(value_pattern % (j + one_based, x) for j, x in row)
if multilabel:
if y_is_sp:
nz_labels = y[i].nonzero()[1]
else:
nz_labels = np.where(y[i] != 0)[0]
labels_str = ",".join(label_pattern % j for j in nz_labels)
else:
if y_is_sp:
labels_str = label_pattern % y.data[i]
else:
labels_str = label_pattern % y[i]
if query_id is not None:
feat = (labels_str, query_id[i], s)
else:
feat = (labels_str, s)
f.write((line_pattern % feat).encode('ascii'))
def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None,
multilabel=False):
"""Dump the dataset in svmlight / libsvm file format.
This format is a text-based format, with one sample per line. It does
not store zero valued features hence is suitable for sparse dataset.
The first element of each line can be used to store a target variable
to predict.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : {array-like, sparse matrix}, shape = [n_samples (, n_labels)]
Target values. Class labels must be an
integer or float, or array-like objects of integer or float for
multilabel classifications.
f : string or file-like in binary mode
If string, specifies the path that will contain the data.
If file-like, data will be written to f. f should be opened in binary
mode.
zero_based : boolean, optional
Whether column indices should be written zero-based (True) or one-based
(False).
comment : string, optional
Comment to insert at the top of the file. This should be either a
Unicode string, which will be encoded as UTF-8, or an ASCII byte
string.
If a comment is given, then it will be preceded by one that identifies
the file as having been dumped by scikit-learn. Note that not all
tools grok comments in SVMlight files.
query_id : array-like, shape = [n_samples]
Array containing pairwise preference constraints (qid in svmlight
format).
multilabel: boolean, optional
Samples may have several labels each (see
http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html)
.. versionadded:: 0.17
parameter *multilabel* to support multilabel datasets.
"""
if comment is not None:
# Convert comment string to list of lines in UTF-8.
# If a byte string is passed, then check whether it's ASCII;
# if a user wants to get fancy, they'll have to decode themselves.
# Avoid mention of str and unicode types for Python 3.x compat.
if isinstance(comment, bytes):
comment.decode("ascii") # just for the exception
else:
comment = comment.encode("utf-8")
if six.b("\0") in comment:
raise ValueError("comment string contains NUL byte")
yval = check_array(y, accept_sparse='csr', ensure_2d=False)
if sp.issparse(yval):
if yval.shape[1] != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples, 1),"
" got %r" % (yval.shape,))
else:
if yval.ndim != 1 and not multilabel:
raise ValueError("expected y of shape (n_samples,), got %r"
% (yval.shape,))
Xval = check_array(X, accept_sparse='csr')
if Xval.shape[0] != yval.shape[0]:
raise ValueError("X.shape[0] and y.shape[0] should be the same, got"
" %r and %r instead." % (Xval.shape[0], yval.shape[0]))
# We had some issues with CSR matrices with unsorted indices (e.g. #1501),
# so sort them here, but first make sure we don't modify the user's X.
# TODO We can do this cheaper; sorted_indices copies the whole matrix.
if yval is y and hasattr(yval, "sorted_indices"):
y = yval.sorted_indices()
else:
y = yval
if hasattr(y, "sort_indices"):
y.sort_indices()
if Xval is X and hasattr(Xval, "sorted_indices"):
X = Xval.sorted_indices()
else:
X = Xval
if hasattr(X, "sort_indices"):
X.sort_indices()
if query_id is not None:
query_id = np.asarray(query_id)
if query_id.shape[0] != y.shape[0]:
raise ValueError("expected query_id of shape (n_samples,), got %r"
% (query_id.shape,))
one_based = not zero_based
if hasattr(f, "write"):
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
else:
with open(f, "wb") as f:
_dump_svmlight(X, y, f, multilabel, one_based, comment, query_id)
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.