repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sssllliang/BuildingMachineLearningSystemsWithPython | ch05/log_reg_example.py | 24 | 3203 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from data import CHART_DIR
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot
np.random.seed(3)
num_per_class = 40
X = np.hstack((norm.rvs(2, size=num_per_class, scale=2),
norm.rvs(8, size=num_per_class, scale=3)))
y = np.hstack((np.zeros(num_per_class),
np.ones(num_per_class)))
def lr_model(clf, X):
return 1.0 / (1.0 + np.exp(-(clf.intercept_ + clf.coef_ * X)))
from sklearn.linear_model import LogisticRegression
logclf = LogisticRegression()
print(logclf)
logclf.fit(X.reshape(num_per_class * 2, 1), y)
print(np.exp(logclf.intercept_), np.exp(logclf.coef_.ravel()))
print("P(x=-1)=%.2f\tP(x=7)=%.2f" %
(lr_model(logclf, -1), lr_model(logclf, 7)))
X_test = np.arange(-5, 20, 0.1)
pyplot.figure(figsize=(10, 4))
pyplot.xlim((-5, 20))
pyplot.scatter(X, y, c=y)
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_example_data.png"), bbox_inches="tight")
def lin_model(clf, X):
return clf.intercept_ + clf.coef_ * X
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
print(clf)
clf.fit(X.reshape(num_per_class * 2, 1), y)
X_odds = np.arange(0, 1, 0.001)
pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.scatter(X, y, c=y)
pyplot.plot(X_test, lin_model(clf, X_test))
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.title("linear fit on original data")
pyplot.grid(True, linestyle='-', color='0.75')
X_ext = np.hstack((X, norm.rvs(20, size=100, scale=5)))
y_ext = np.hstack((y, np.ones(100)))
clf = LinearRegression()
clf.fit(X_ext.reshape(num_per_class * 2 + 100, 1), y_ext)
pyplot.subplot(1, 2, 2)
pyplot.scatter(X_ext, y_ext, c=y_ext)
pyplot.plot(X_ext, lin_model(clf, X_ext))
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.title("linear fit on additional data")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_log_linear_fit.png"), bbox_inches="tight")
pyplot.figure(figsize=(10, 4))
pyplot.xlim((-5, 20))
pyplot.scatter(X, y, c=y)
pyplot.plot(X_test, lr_model(logclf, X_test).ravel())
pyplot.plot(X_test, np.ones(X_test.shape[0]) * 0.5, "--")
pyplot.xlabel("feature value")
pyplot.ylabel("class")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_example_fitted.png"), bbox_inches="tight")
X = np.arange(0, 1, 0.001)
pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.xlim((0, 1))
pyplot.ylim((0, 10))
pyplot.plot(X, X / (1 - X))
pyplot.xlabel("P")
pyplot.ylabel("odds = P / (1-P)")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.subplot(1, 2, 2)
pyplot.xlim((0, 1))
pyplot.plot(X, np.log(X / (1 - X)))
pyplot.xlabel("P")
pyplot.ylabel("log(odds) = log(P / (1-P))")
pyplot.grid(True, linestyle='-', color='0.75')
pyplot.savefig(
os.path.join(CHART_DIR, "log_reg_log_odds.png"), bbox_inches="tight")
| mit |
Tong-Chen/scikit-learn | examples/decomposition/plot_image_denoising.py | 8 | 5773 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import pylab as pl
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height / 2:] += 0.075 * np.random.randn(width, height / 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height / 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
pl.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
pl.subplot(10, 10, i + 1)
pl.imshow(comp.reshape(patch_size), cmap=pl.cm.gray_r,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
pl.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
pl.figure(figsize=(5, 3.3))
pl.subplot(1, 2, 1)
pl.title('Image')
pl.imshow(image, vmin=0, vmax=1, cmap=pl.cm.gray, interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.subplot(1, 2, 2)
difference = image - reference
pl.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
pl.imshow(difference, vmin=-0.5, vmax=0.5, cmap=pl.cm.PuOr,
interpolation='nearest')
pl.xticks(())
pl.yticks(())
pl.suptitle(title, size=16)
pl.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height / 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height / 2:] = reconstruct_from_patches_2d(
patches, (width, height / 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
pl.show()
| bsd-3-clause |
Unidata/MetPy | v1.0/_downloads/7b1d8e864fd4783fdaff1a83cdf9c52f/Find_Natural_Neighbors_Verification.py | 6 | 2521 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Find Natural Neighbors Verification
===================================
Finding natural neighbors in a triangulation
A triangle is a natural neighbor of a point if that point is within a circumscribed
circle ("circumcircle") containing the triangle.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Delaunay
from metpy.interpolate.geometry import circumcircle_radius, find_natural_neighbors
# Create test observations, test points, and plot the triangulation and points.
gx, gy = np.meshgrid(np.arange(0, 20, 4), np.arange(0, 20, 4))
pts = np.vstack([gx.ravel(), gy.ravel()]).T
tri = Delaunay(pts)
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
test_points = np.array([[2, 2], [5, 10], [12, 13.4], [12, 8], [20, 20]])
for i, (x, y) in enumerate(test_points):
ax.plot(x, y, 'k.', markersize=6)
ax.annotate('test ' + str(i), xy=(x, y))
###########################################
# Since finding natural neighbors already calculates circumcenters, return
# that information for later use.
#
# The key of the neighbors dictionary refers to the test point index, and the list of integers
# are the triangles that are natural neighbors of that particular test point.
#
# Since point 4 is far away from the triangulation, it has no natural neighbors.
# Point 3 is at the confluence of several triangles so it has many natural neighbors.
neighbors, circumcenters = find_natural_neighbors(tri, test_points)
print(neighbors)
###########################################
# We can plot all of the triangles as well as the circles representing the circumcircles
#
fig, ax = plt.subplots(figsize=(15, 10))
for i, inds in enumerate(tri.simplices):
pts = tri.points[inds]
x, y = np.vstack((pts, pts[0])).T
ax.plot(x, y)
ax.annotate(i, xy=(np.mean(x), np.mean(y)))
# Using circumcenters and calculated circumradii, plot the circumcircles
for idx, cc in enumerate(circumcenters):
ax.plot(cc[0], cc[1], 'k.', markersize=5)
circ = plt.Circle(cc, circumcircle_radius(*tri.points[tri.simplices[idx]]),
edgecolor='k', facecolor='none', transform=fig.axes[0].transData)
ax.add_artist(circ)
ax.set_aspect('equal', 'datalim')
plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/io/test_stata.py | 2 | 69256 | from collections import OrderedDict
import datetime as dt
from datetime import datetime
import gzip
import io
import os
import struct
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas.core.frame import DataFrame, Series
import pandas.util.testing as tm
from pandas.io.parsers import read_csv
from pandas.io.stata import (
InvalidColumnName,
PossiblePrecisionLoss,
StataMissingValue,
StataReader,
read_stata,
)
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data")
@pytest.fixture
def parsed_114(dirpath):
dta14_114 = os.path.join(dirpath, "stata5_114.dta")
parsed_114 = read_stata(dta14_114, convert_dates=True)
parsed_114.index.name = "index"
return parsed_114
class TestStata:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "data")
self.dta1_114 = os.path.join(self.dirpath, "stata1_114.dta")
self.dta1_117 = os.path.join(self.dirpath, "stata1_117.dta")
self.dta2_113 = os.path.join(self.dirpath, "stata2_113.dta")
self.dta2_114 = os.path.join(self.dirpath, "stata2_114.dta")
self.dta2_115 = os.path.join(self.dirpath, "stata2_115.dta")
self.dta2_117 = os.path.join(self.dirpath, "stata2_117.dta")
self.dta3_113 = os.path.join(self.dirpath, "stata3_113.dta")
self.dta3_114 = os.path.join(self.dirpath, "stata3_114.dta")
self.dta3_115 = os.path.join(self.dirpath, "stata3_115.dta")
self.dta3_117 = os.path.join(self.dirpath, "stata3_117.dta")
self.csv3 = os.path.join(self.dirpath, "stata3.csv")
self.dta4_113 = os.path.join(self.dirpath, "stata4_113.dta")
self.dta4_114 = os.path.join(self.dirpath, "stata4_114.dta")
self.dta4_115 = os.path.join(self.dirpath, "stata4_115.dta")
self.dta4_117 = os.path.join(self.dirpath, "stata4_117.dta")
self.dta_encoding = os.path.join(self.dirpath, "stata1_encoding.dta")
self.dta_encoding_118 = os.path.join(self.dirpath, "stata1_encoding_118.dta")
self.csv14 = os.path.join(self.dirpath, "stata5.csv")
self.dta14_113 = os.path.join(self.dirpath, "stata5_113.dta")
self.dta14_114 = os.path.join(self.dirpath, "stata5_114.dta")
self.dta14_115 = os.path.join(self.dirpath, "stata5_115.dta")
self.dta14_117 = os.path.join(self.dirpath, "stata5_117.dta")
self.csv15 = os.path.join(self.dirpath, "stata6.csv")
self.dta15_113 = os.path.join(self.dirpath, "stata6_113.dta")
self.dta15_114 = os.path.join(self.dirpath, "stata6_114.dta")
self.dta15_115 = os.path.join(self.dirpath, "stata6_115.dta")
self.dta15_117 = os.path.join(self.dirpath, "stata6_117.dta")
self.dta16_115 = os.path.join(self.dirpath, "stata7_115.dta")
self.dta16_117 = os.path.join(self.dirpath, "stata7_117.dta")
self.dta17_113 = os.path.join(self.dirpath, "stata8_113.dta")
self.dta17_115 = os.path.join(self.dirpath, "stata8_115.dta")
self.dta17_117 = os.path.join(self.dirpath, "stata8_117.dta")
self.dta18_115 = os.path.join(self.dirpath, "stata9_115.dta")
self.dta18_117 = os.path.join(self.dirpath, "stata9_117.dta")
self.dta19_115 = os.path.join(self.dirpath, "stata10_115.dta")
self.dta19_117 = os.path.join(self.dirpath, "stata10_117.dta")
self.dta20_115 = os.path.join(self.dirpath, "stata11_115.dta")
self.dta20_117 = os.path.join(self.dirpath, "stata11_117.dta")
self.dta21_117 = os.path.join(self.dirpath, "stata12_117.dta")
self.dta22_118 = os.path.join(self.dirpath, "stata14_118.dta")
self.dta23 = os.path.join(self.dirpath, "stata15.dta")
self.dta24_111 = os.path.join(self.dirpath, "stata7_111.dta")
self.dta25_118 = os.path.join(self.dirpath, "stata16_118.dta")
self.stata_dates = os.path.join(self.dirpath, "stata13_dates.dta")
def read_dta(self, file):
# Legacy default reader configuration
return read_stata(file, convert_dates=True)
def read_csv(self, file):
return read_csv(file, parse_dates=True)
@pytest.mark.parametrize("version", [114, 117])
def test_read_empty_dta(self, version):
empty_ds = DataFrame(columns=["unit"])
# GH 7369, make sure can read a 0-obs dta file
with tm.ensure_clean() as path:
empty_ds.to_stata(path, write_index=False, version=version)
empty_ds2 = read_stata(path)
tm.assert_frame_equal(empty_ds, empty_ds2)
def test_data_method(self):
# Minimal testing of legacy data method
with StataReader(self.dta1_114) as rdr:
with tm.assert_produces_warning(UserWarning):
parsed_114_data = rdr.data()
with StataReader(self.dta1_114) as rdr:
parsed_114_read = rdr.read()
tm.assert_frame_equal(parsed_114_data, parsed_114_read)
@pytest.mark.parametrize("file", ["dta1_114", "dta1_117"])
def test_read_dta1(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# Pandas uses np.nan as missing value.
# Thus, all columns will be of type float, regardless of their name.
expected = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
# this is an oddity as really the nan should be float64, but
# the casting doesn't fail so need to match stata here
expected["float_miss"] = expected["float_miss"].astype(np.float32)
tm.assert_frame_equal(parsed, expected)
def test_read_dta2(self):
expected = DataFrame.from_records(
[
(
datetime(2006, 11, 19, 23, 13, 20),
1479596223000,
datetime(2010, 1, 20),
datetime(2010, 1, 8),
datetime(2010, 1, 1),
datetime(1974, 7, 1),
datetime(2010, 1, 1),
datetime(2010, 1, 1),
),
(
datetime(1959, 12, 31, 20, 3, 20),
-1479590,
datetime(1953, 10, 2),
datetime(1948, 6, 10),
datetime(1955, 1, 1),
datetime(1955, 7, 1),
datetime(1955, 1, 1),
datetime(2, 1, 1),
),
(pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT),
],
columns=[
"datetime_c",
"datetime_big_c",
"date",
"weekly_date",
"monthly_date",
"quarterly_date",
"half_yearly_date",
"yearly_date",
],
)
expected["yearly_date"] = expected["yearly_date"].astype("O")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed_114 = self.read_dta(self.dta2_114)
parsed_115 = self.read_dta(self.dta2_115)
parsed_117 = self.read_dta(self.dta2_117)
# 113 is buggy due to limits of date format support in Stata
# parsed_113 = self.read_dta(self.dta2_113)
# Remove resource warnings
w = [x for x in w if x.category is UserWarning]
# should get warning for each call to read_dta
assert len(w) == 3
# buggy test because of the NaT comparison on certain platforms
# Format 113 test fails since it does not support tc and tC formats
# tm.assert_frame_equal(parsed_113, expected)
tm.assert_frame_equal(parsed_114, expected, check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_115, expected, check_datetimelike_compat=True)
tm.assert_frame_equal(parsed_117, expected, check_datetimelike_compat=True)
@pytest.mark.parametrize("file", ["dta3_113", "dta3_114", "dta3_115", "dta3_117"])
def test_read_dta3(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
# match stata here
expected = self.read_csv(self.csv3)
expected = expected.astype(np.float32)
expected["year"] = expected["year"].astype(np.int16)
expected["quarter"] = expected["quarter"].astype(np.int8)
tm.assert_frame_equal(parsed, expected)
@pytest.mark.parametrize("file", ["dta4_113", "dta4_114", "dta4_115", "dta4_117"])
def test_read_dta4(self, file):
file = getattr(self, file)
parsed = self.read_dta(file)
expected = DataFrame.from_records(
[
["one", "ten", "one", "one", "one"],
["two", "nine", "two", "two", "two"],
["three", "eight", "three", "three", "three"],
["four", "seven", 4, "four", "four"],
["five", "six", 5, np.nan, "five"],
["six", "five", 6, np.nan, "six"],
["seven", "four", 7, np.nan, "seven"],
["eight", "three", 8, np.nan, "eight"],
["nine", "two", 9, np.nan, "nine"],
["ten", "one", "ten", np.nan, "ten"],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
],
)
# these are all categoricals
expected = pd.concat(
[expected[col].astype("category") for col in expected], axis=1
)
# stata doesn't save .category metadata
tm.assert_frame_equal(parsed, expected, check_categorical=False)
# File containing strls
def test_read_dta12(self):
parsed_117 = self.read_dta(self.dta21_117)
expected = DataFrame.from_records(
[
[1, "abc", "abcdefghi"],
[3, "cba", "qwertywertyqwerty"],
[93, "", "strl"],
],
columns=["x", "y", "z"],
)
tm.assert_frame_equal(parsed_117, expected, check_dtype=False)
def test_read_dta18(self):
parsed_118 = self.read_dta(self.dta22_118)
parsed_118["Bytes"] = parsed_118["Bytes"].astype("O")
expected = DataFrame.from_records(
[
["Cat", "Bogota", "Bogotá", 1, 1.0, "option b Ünicode", 1.0],
["Dog", "Boston", "Uzunköprü", np.nan, np.nan, np.nan, np.nan],
["Plane", "Rome", "Tromsø", 0, 0.0, "option a", 0.0],
["Potato", "Tokyo", "Elâzığ", -4, 4.0, 4, 4],
["", "", "", 0, 0.3332999, "option a", 1 / 3.0],
],
columns=[
"Things",
"Cities",
"Unicode_Cities_Strl",
"Ints",
"Floats",
"Bytes",
"Longs",
],
)
expected["Floats"] = expected["Floats"].astype(np.float32)
for col in parsed_118.columns:
tm.assert_almost_equal(parsed_118[col], expected[col])
with StataReader(self.dta22_118) as rdr:
vl = rdr.variable_labels()
vl_expected = {
"Unicode_Cities_Strl": "Here are some strls with Ünicode chars",
"Longs": "long data",
"Things": "Here are some things",
"Bytes": "byte data",
"Ints": "int data",
"Cities": "Here are some cities",
"Floats": "float data",
}
tm.assert_dict_equal(vl, vl_expected)
assert rdr.data_label == "This is a Ünicode data label"
def test_read_write_dta5(self):
original = DataFrame(
[(np.nan, np.nan, np.nan, np.nan, np.nan)],
columns=["float_miss", "double_miss", "byte_miss", "int_miss", "long_miss"],
)
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), original)
def test_write_dta6(self):
original = self.read_csv(self.csv3)
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["year"] = original["year"].astype(np.int32)
original["quarter"] = original["quarter"].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
@pytest.mark.parametrize("version", [114, 117])
def test_read_write_dta10(self, version):
original = DataFrame(
data=[["string", "object", 1, 1.1, np.datetime64("2003-12-25")]],
columns=["string", "object", "integer", "floating", "datetime"],
)
original["object"] = Series(original["object"], dtype=object)
original.index.name = "index"
original.index = original.index.astype(np.int32)
original["integer"] = original["integer"].astype(np.int32)
with tm.ensure_clean() as path:
original.to_stata(path, {"datetime": "tc"}, version=version)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
def test_stata_doc_examples(self):
with tm.ensure_clean() as path:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.to_stata(path)
def test_write_preserves_original(self):
# 9795
np.random.seed(423)
df = pd.DataFrame(np.random.randn(5, 4), columns=list("abcd"))
df.loc[2, "a":"c"] = np.nan
df_copy = df.copy()
with tm.ensure_clean() as path:
df.to_stata(path, write_index=False)
tm.assert_frame_equal(df, df_copy)
@pytest.mark.parametrize("version", [114, 117])
def test_encoding(self, version):
# GH 4626, proper encoding handling
raw = read_stata(self.dta_encoding)
with tm.assert_produces_warning(FutureWarning):
encoded = read_stata(self.dta_encoding, encoding="latin-1")
result = encoded.kreis1849[0]
expected = raw.kreis1849[0]
assert result == expected
assert isinstance(result, str)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning):
encoded.to_stata(
path, write_index=False, version=version, encoding="latin-1"
)
reread_encoded = read_stata(path)
tm.assert_frame_equal(encoded, reread_encoded)
def test_read_write_dta11(self):
original = DataFrame(
[(1, 2, 3, 4)],
columns=[
"good",
"b\u00E4d",
"8number",
"astringwithmorethan32characters______",
],
)
formatted = DataFrame(
[(1, 2, 3, 4)],
columns=["good", "b_d", "_8number", "astringwithmorethan32characters_"],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
original.to_stata(path, None)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
@pytest.mark.parametrize("version", [114, 117])
def test_read_write_dta12(self, version):
original = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_1",
"astringwithmorethan32characters_2",
"+",
"-",
"short",
"delete",
],
)
formatted = DataFrame(
[(1, 2, 3, 4, 5, 6)],
columns=[
"astringwithmorethan32characters_",
"_0astringwithmorethan32character",
"_",
"_1_",
"_short",
"_delete",
],
)
formatted.index.name = "index"
formatted = formatted.astype(np.int32)
with tm.ensure_clean() as path:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", InvalidColumnName)
original.to_stata(path, None, version=version)
# should get a warning for that format.
assert len(w) == 1
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
def test_read_write_dta13(self):
s1 = Series(2 ** 9, dtype=np.int16)
s2 = Series(2 ** 17, dtype=np.int32)
s3 = Series(2 ** 33, dtype=np.int64)
original = DataFrame({"int16": s1, "int32": s2, "int64": s3})
original.index.name = "index"
formatted = original
formatted["int64"] = formatted["int64"].astype(np.float64)
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), formatted)
@pytest.mark.parametrize("version", [114, 117])
@pytest.mark.parametrize(
"file", ["dta14_113", "dta14_114", "dta14_115", "dta14_117"]
)
def test_read_write_reread_dta14(self, file, parsed_114, version):
file = getattr(self, file)
parsed = self.read_dta(file)
parsed.index.name = "index"
expected = self.read_csv(self.csv14)
cols = ["byte_", "int_", "long_", "float_", "double_"]
for col in cols:
expected[col] = expected[col]._convert(datetime=True, numeric=True)
expected["float_"] = expected["float_"].astype(np.float32)
expected["date_td"] = pd.to_datetime(expected["date_td"], errors="coerce")
tm.assert_frame_equal(parsed_114, parsed)
with tm.ensure_clean() as path:
parsed_114.to_stata(path, {"date_td": "td"}, version=version)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), parsed_114)
@pytest.mark.parametrize(
"file", ["dta15_113", "dta15_114", "dta15_115", "dta15_117"]
)
def test_read_write_reread_dta15(self, file):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
file = getattr(self, file)
parsed = self.read_dta(file)
tm.assert_frame_equal(expected, parsed)
@pytest.mark.parametrize("version", [114, 117])
def test_timestamp_and_label(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = datetime(2000, 2, 29, 14, 21)
data_label = "This is a data file."
with tm.ensure_clean() as path:
original.to_stata(
path, time_stamp=time_stamp, data_label=data_label, version=version
)
with StataReader(path) as reader:
assert reader.time_stamp == "29 Feb 2000 14:21"
assert reader.data_label == data_label
@pytest.mark.parametrize("version", [114, 117])
def test_invalid_timestamp(self, version):
original = DataFrame([(1,)], columns=["variable"])
time_stamp = "01 Jan 2000, 00:00:00"
with tm.ensure_clean() as path:
msg = "time_stamp should be datetime type"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, time_stamp=time_stamp, version=version)
def test_numeric_column_names(self):
original = DataFrame(np.reshape(np.arange(25.0), (5, 5)))
original.index.name = "index"
with tm.ensure_clean() as path:
# should get a warning for that format.
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
columns = list(written_and_read_again.columns)
convert_col_name = lambda x: int(x[1])
written_and_read_again.columns = map(convert_col_name, columns)
tm.assert_frame_equal(original, written_and_read_again)
@pytest.mark.parametrize("version", [114, 117])
def test_nan_to_missing_value(self, version):
s1 = Series(np.arange(4.0), dtype=np.float32)
s2 = Series(np.arange(4.0), dtype=np.float64)
s1[::2] = np.nan
s2[1::2] = np.nan
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
tm.assert_frame_equal(written_and_read_again, original)
def test_no_index(self):
columns = ["x", "y"]
original = DataFrame(np.reshape(np.arange(10.0), (5, 2)), columns=columns)
original.index.name = "index_not_written"
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
written_and_read_again = self.read_dta(path)
with pytest.raises(KeyError, match=original.index.name):
written_and_read_again["index_not_written"]
def test_string_no_dates(self):
s1 = Series(["a", "A longer string"])
s2 = Series([1.0, 2.0], dtype=np.float64)
original = DataFrame({"s1": s1, "s2": s2})
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), original)
def test_large_value_conversion(self):
s0 = Series([1, 99], dtype=np.int8)
s1 = Series([1, 127], dtype=np.int8)
s2 = Series([1, 2 ** 15 - 1], dtype=np.int16)
s3 = Series([1, 2 ** 63 - 1], dtype=np.int64)
original = DataFrame({"s0": s0, "s1": s1, "s2": s2, "s3": s3})
original.index.name = "index"
with tm.ensure_clean() as path:
with tm.assert_produces_warning(PossiblePrecisionLoss):
original.to_stata(path)
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified["s1"] = Series(modified["s1"], dtype=np.int16)
modified["s2"] = Series(modified["s2"], dtype=np.int32)
modified["s3"] = Series(modified["s3"], dtype=np.float64)
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
def test_dates_invalid_column(self):
original = DataFrame([datetime(2006, 11, 19, 23, 13, 20)])
original.index.name = "index"
with tm.ensure_clean() as path:
with tm.assert_produces_warning(InvalidColumnName):
original.to_stata(path, {0: "tc"})
written_and_read_again = self.read_dta(path)
modified = original.copy()
modified.columns = ["_0"]
tm.assert_frame_equal(written_and_read_again.set_index("index"), modified)
def test_105(self):
# Data obtained from:
# http://go.worldbank.org/ZXY29PVJ21
dpath = os.path.join(self.dirpath, "S4_EDUC1.dta")
df = pd.read_stata(dpath)
df0 = [[1, 1, 3, -2], [2, 1, 2, -2], [4, 1, 1, -2]]
df0 = pd.DataFrame(df0)
df0.columns = ["clustnum", "pri_schl", "psch_num", "psch_dis"]
df0["clustnum"] = df0["clustnum"].astype(np.int16)
df0["pri_schl"] = df0["pri_schl"].astype(np.int8)
df0["psch_num"] = df0["psch_num"].astype(np.int8)
df0["psch_dis"] = df0["psch_dis"].astype(np.float32)
tm.assert_frame_equal(df.head(3), df0)
def test_value_labels_old_format(self):
# GH 19417
#
# Test that value_labels() returns an empty dict if the file format
# predates supporting value labels.
dpath = os.path.join(self.dirpath, "S4_EDUC1.dta")
reader = StataReader(dpath)
assert reader.value_labels() == {}
reader.close()
def test_date_export_formats(self):
columns = ["tc", "td", "tw", "tm", "tq", "th", "ty"]
conversions = {c: c for c in columns}
data = [datetime(2006, 11, 20, 23, 13, 20)] * len(columns)
original = DataFrame([data], columns=columns)
original.index.name = "index"
expected_values = [
datetime(2006, 11, 20, 23, 13, 20), # Time
datetime(2006, 11, 20), # Day
datetime(2006, 11, 19), # Week
datetime(2006, 11, 1), # Month
datetime(2006, 10, 1), # Quarter year
datetime(2006, 7, 1), # Half year
datetime(2006, 1, 1),
] # Year
expected = DataFrame([expected_values], columns=columns)
expected.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, conversions)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
def test_write_missing_strings(self):
original = DataFrame([["1"], [None]], columns=["foo"])
expected = DataFrame([["1"], [""]], columns=["foo"])
expected.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(written_and_read_again.set_index("index"), expected)
@pytest.mark.parametrize("version", [114, 117])
@pytest.mark.parametrize("byteorder", [">", "<"])
def test_bool_uint(self, byteorder, version):
s0 = Series([0, 1, True], dtype=np.bool)
s1 = Series([0, 1, 100], dtype=np.uint8)
s2 = Series([0, 1, 255], dtype=np.uint8)
s3 = Series([0, 1, 2 ** 15 - 100], dtype=np.uint16)
s4 = Series([0, 1, 2 ** 16 - 1], dtype=np.uint16)
s5 = Series([0, 1, 2 ** 31 - 100], dtype=np.uint32)
s6 = Series([0, 1, 2 ** 32 - 1], dtype=np.uint32)
original = DataFrame(
{"s0": s0, "s1": s1, "s2": s2, "s3": s3, "s4": s4, "s5": s5, "s6": s6}
)
original.index.name = "index"
expected = original.copy()
expected_types = (
np.int8,
np.int8,
np.int16,
np.int16,
np.int32,
np.int32,
np.float64,
)
for c, t in zip(expected.columns, expected_types):
expected[c] = expected[c].astype(t)
with tm.ensure_clean() as path:
original.to_stata(path, byteorder=byteorder, version=version)
written_and_read_again = self.read_dta(path)
written_and_read_again = written_and_read_again.set_index("index")
tm.assert_frame_equal(written_and_read_again, expected)
def test_variable_labels(self):
with StataReader(self.dta16_115) as rdr:
sr_115 = rdr.variable_labels()
with StataReader(self.dta16_117) as rdr:
sr_117 = rdr.variable_labels()
keys = ("var1", "var2", "var3")
labels = ("label1", "label2", "label3")
for k, v in sr_115.items():
assert k in sr_117
assert v == sr_117[k]
assert k in keys
assert v in labels
def test_minimal_size_col(self):
str_lens = (1, 100, 244)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
with StataReader(path) as sr:
typlist = sr.typlist
variables = sr.varlist
formats = sr.fmtlist
for variable, fmt, typ in zip(variables, formats, typlist):
assert int(variable[1:]) == int(fmt[1:-1])
assert int(variable[1:]) == typ
def test_excessively_long_string(self):
str_lens = (1, 244, 500)
s = {}
for str_len in str_lens:
s["s" + str(str_len)] = Series(
["a" * str_len, "b" * str_len, "c" * str_len]
)
original = DataFrame(s)
msg = (
r"Fixed width strings in Stata \.dta files are limited to 244"
r" \(or fewer\)\ncharacters\. Column 's500' does not satisfy"
r" this restriction\. Use the\n'version=117' parameter to write"
r" the newer \(Stata 13 and later\) format\."
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_missing_value_generator(self):
types = ("b", "h", "l")
df = DataFrame([[0.0]], columns=["float_"])
with tm.ensure_clean() as path:
df.to_stata(path)
with StataReader(path) as rdr:
valid_range = rdr.VALID_RANGE
expected_values = ["." + chr(97 + i) for i in range(26)]
expected_values.insert(0, ".")
for t in types:
offset = valid_range[t][1]
for i in range(0, 27):
val = StataMissingValue(offset + 1 + i)
assert val.string == expected_values[i]
# Test extremes for floats
val = StataMissingValue(struct.unpack("<f", b"\x00\x00\x00\x7f")[0])
assert val.string == "."
val = StataMissingValue(struct.unpack("<f", b"\x00\xd0\x00\x7f")[0])
assert val.string == ".z"
# Test extremes for floats
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x00\xe0\x7f")[0]
)
assert val.string == "."
val = StataMissingValue(
struct.unpack("<d", b"\x00\x00\x00\x00\x00\x1a\xe0\x7f")[0]
)
assert val.string == ".z"
@pytest.mark.parametrize("file", ["dta17_113", "dta17_115", "dta17_117"])
def test_missing_value_conversion(self, file):
columns = ["int8_", "int16_", "int32_", "float32_", "float64_"]
smv = StataMissingValue(101)
keys = sorted(smv.MISSING_VALUES.keys())
data = []
for i in range(27):
row = [StataMissingValue(keys[i + (j * 27)]) for j in range(5)]
data.append(row)
expected = DataFrame(data, columns=columns)
parsed = read_stata(getattr(self, file), convert_missing=True)
tm.assert_frame_equal(parsed, expected)
def test_big_dates(self):
yr = [1960, 2000, 9999, 100, 2262, 1677]
mo = [1, 1, 12, 1, 4, 9]
dd = [1, 1, 31, 1, 22, 23]
hr = [0, 0, 23, 0, 0, 0]
mm = [0, 0, 59, 0, 0, 0]
ss = [0, 0, 59, 0, 0, 0]
expected = []
for i in range(len(yr)):
row = []
for j in range(7):
if j == 0:
row.append(datetime(yr[i], mo[i], dd[i], hr[i], mm[i], ss[i]))
elif j == 6:
row.append(datetime(yr[i], 1, 1))
else:
row.append(datetime(yr[i], mo[i], dd[i]))
expected.append(row)
expected.append([pd.NaT] * 7)
columns = [
"date_tc",
"date_td",
"date_tw",
"date_tm",
"date_tq",
"date_th",
"date_ty",
]
# Fixes for weekly, quarterly,half,year
expected[2][2] = datetime(9999, 12, 24)
expected[2][3] = datetime(9999, 12, 1)
expected[2][4] = datetime(9999, 10, 1)
expected[2][5] = datetime(9999, 7, 1)
expected[4][2] = datetime(2262, 4, 16)
expected[4][3] = expected[4][4] = datetime(2262, 4, 1)
expected[4][5] = expected[4][6] = datetime(2262, 1, 1)
expected[5][2] = expected[5][3] = expected[5][4] = datetime(1677, 10, 1)
expected[5][5] = expected[5][6] = datetime(1678, 1, 1)
expected = DataFrame(expected, columns=columns, dtype=np.object)
parsed_115 = read_stata(self.dta18_115)
parsed_117 = read_stata(self.dta18_117)
tm.assert_frame_equal(expected, parsed_115, check_datetimelike_compat=True)
tm.assert_frame_equal(expected, parsed_117, check_datetimelike_compat=True)
date_conversion = {c: c[-2:] for c in columns}
# {c : c[-2:] for c in columns}
with tm.ensure_clean() as path:
expected.index.name = "index"
expected.to_stata(path, date_conversion)
written_and_read_again = self.read_dta(path)
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
expected,
check_datetimelike_compat=True,
)
def test_dtype_conversion(self):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
no_conversion = read_stata(self.dta15_117, convert_dates=True)
tm.assert_frame_equal(expected, no_conversion)
conversion = read_stata(
self.dta15_117, convert_dates=True, preserve_dtypes=False
)
# read_csv types are the same
expected = self.read_csv(self.csv15)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
tm.assert_frame_equal(expected, conversion)
def test_drop_column(self):
expected = self.read_csv(self.csv15)
expected["byte_"] = expected["byte_"].astype(np.int8)
expected["int_"] = expected["int_"].astype(np.int16)
expected["long_"] = expected["long_"].astype(np.int32)
expected["float_"] = expected["float_"].astype(np.float32)
expected["double_"] = expected["double_"].astype(np.float64)
expected["date_td"] = expected["date_td"].apply(
datetime.strptime, args=("%Y-%m-%d",)
)
columns = ["byte_", "int_", "long_"]
expected = expected[columns]
dropped = read_stata(self.dta15_117, convert_dates=True, columns=columns)
tm.assert_frame_equal(expected, dropped)
# See PR 10757
columns = ["int_", "long_", "byte_"]
expected = expected[columns]
reordered = read_stata(self.dta15_117, convert_dates=True, columns=columns)
tm.assert_frame_equal(expected, reordered)
msg = "columns contains duplicate entries"
with pytest.raises(ValueError, match=msg):
columns = ["byte_", "byte_"]
read_stata(self.dta15_117, convert_dates=True, columns=columns)
msg = "The following columns were not found in the Stata data set: not_found"
with pytest.raises(ValueError, match=msg):
columns = ["byte_", "int_", "long_", "not_found"]
read_stata(self.dta15_117, convert_dates=True, columns=columns)
@pytest.mark.parametrize("version", [114, 117])
@pytest.mark.filterwarnings(
"ignore:\\nStata value:pandas.io.stata.ValueLabelTypeMismatch"
)
def test_categorical_writing(self, version):
original = DataFrame.from_records(
[
["one", "ten", "one", "one", "one", 1],
["two", "nine", "two", "two", "two", 2],
["three", "eight", "three", "three", "three", 3],
["four", "seven", 4, "four", "four", 4],
["five", "six", 5, np.nan, "five", 5],
["six", "five", 6, np.nan, "six", 6],
["seven", "four", 7, np.nan, "seven", 7],
["eight", "three", 8, np.nan, "eight", 8],
["nine", "two", 9, np.nan, "nine", 9],
["ten", "one", "ten", np.nan, "ten", 10],
],
columns=[
"fully_labeled",
"fully_labeled2",
"incompletely_labeled",
"labeled_with_missings",
"float_labelled",
"unlabeled",
],
)
expected = original.copy()
# these are all categoricals
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
expected["incompletely_labeled"] = expected["incompletely_labeled"].apply(str)
expected["unlabeled"] = expected["unlabeled"].apply(str)
expected = pd.concat(
[expected[col].astype("category") for col in expected], axis=1
)
expected.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
tm.assert_frame_equal(res, expected, check_categorical=False)
def test_categorical_warnings_and_errors(self):
# Warning for non-string labels
# Error for labels too long
original = pd.DataFrame.from_records(
[["a" * 10000], ["b" * 10000], ["c" * 10000], ["d" * 10000]],
columns=["Too_long"],
)
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
with tm.ensure_clean() as path:
msg = (
"Stata value labels for a single variable must have"
r" a combined length less than 32,000 characters\."
)
with pytest.raises(ValueError, match=msg):
original.to_stata(path)
original = pd.DataFrame.from_records(
[["a"], ["b"], ["c"], ["d"], [1]], columns=["Too_long"]
)
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
with tm.assert_produces_warning(pd.io.stata.ValueLabelTypeMismatch):
original.to_stata(path)
# should get a warning for mixed content
@pytest.mark.parametrize("version", [114, 117])
def test_categorical_with_stata_missing_values(self, version):
values = [["a" + str(i)] for i in range(120)]
values.append([np.nan])
original = pd.DataFrame.from_records(values, columns=["many_labels"])
original = pd.concat(
[original[col].astype("category") for col in original], axis=1
)
original.index.name = "index"
with tm.ensure_clean() as path:
original.to_stata(path, version=version)
written_and_read_again = self.read_dta(path)
res = written_and_read_again.set_index("index")
tm.assert_frame_equal(res, original, check_categorical=False)
@pytest.mark.parametrize("file", ["dta19_115", "dta19_117"])
def test_categorical_order(self, file):
# Directly construct using expected codes
# Format is is_cat, col_name, labels (in order), underlying data
expected = [
(True, "ordered", ["a", "b", "c", "d", "e"], np.arange(5)),
(True, "reverse", ["a", "b", "c", "d", "e"], np.arange(5)[::-1]),
(True, "noorder", ["a", "b", "c", "d", "e"], np.array([2, 1, 4, 0, 3])),
(True, "floating", ["a", "b", "c", "d", "e"], np.arange(0, 5)),
(True, "float_missing", ["a", "d", "e"], np.array([0, 1, 2, -1, -1])),
(False, "nolabel", [1.0, 2.0, 3.0, 4.0, 5.0], np.arange(5)),
(True, "int32_mixed", ["d", 2, "e", "b", "a"], np.arange(5)),
]
cols = []
for is_cat, col, labels, codes in expected:
if is_cat:
cols.append((col, pd.Categorical.from_codes(codes, labels)))
else:
cols.append((col, pd.Series(labels, dtype=np.float32)))
expected = DataFrame.from_dict(OrderedDict(cols))
# Read with and with out categoricals, ensure order is identical
file = getattr(self, file)
parsed = read_stata(file)
tm.assert_frame_equal(expected, parsed, check_categorical=False)
# Check identity of codes
for col in expected:
if is_categorical_dtype(expected[col]):
tm.assert_series_equal(expected[col].cat.codes, parsed[col].cat.codes)
tm.assert_index_equal(
expected[col].cat.categories, parsed[col].cat.categories
)
@pytest.mark.parametrize("file", ["dta20_115", "dta20_117"])
def test_categorical_sorting(self, file):
parsed = read_stata(getattr(self, file))
# Sort based on codes, not strings
parsed = parsed.sort_values("srh", na_position="first")
# Don't sort index
parsed.index = np.arange(parsed.shape[0])
codes = [-1, -1, 0, 1, 1, 1, 2, 2, 3, 4]
categories = ["Poor", "Fair", "Good", "Very good", "Excellent"]
cat = pd.Categorical.from_codes(codes=codes, categories=categories)
expected = pd.Series(cat, name="srh")
tm.assert_series_equal(expected, parsed["srh"], check_categorical=False)
@pytest.mark.parametrize("file", ["dta19_115", "dta19_117"])
def test_categorical_ordering(self, file):
file = getattr(self, file)
parsed = read_stata(file)
parsed_unordered = read_stata(file, order_categoricals=False)
for col in parsed:
if not is_categorical_dtype(parsed[col]):
continue
assert parsed[col].cat.ordered
assert not parsed_unordered[col].cat.ordered
@pytest.mark.parametrize(
"file",
[
"dta1_117",
"dta2_117",
"dta3_117",
"dta4_117",
"dta14_117",
"dta15_117",
"dta16_117",
"dta17_117",
"dta18_117",
"dta19_117",
"dta20_117",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_117(
self, file, chunksize, convert_categoricals, convert_dates
):
fname = getattr(self, file)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
itr = read_stata(
fname,
iterator=True,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :]
tm.assert_frame_equal(
from_frame,
chunk,
check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False,
)
pos += chunksize
itr.close()
def test_iterator(self):
fname = self.dta3_117
parsed = read_stata(fname)
with read_stata(fname, iterator=True) as itr:
chunk = itr.read(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = list(itr)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk[0])
with read_stata(fname, iterator=True) as itr:
chunk = itr.get_chunk(5)
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
with read_stata(fname, chunksize=5) as itr:
chunk = itr.get_chunk()
tm.assert_frame_equal(parsed.iloc[0:5, :], chunk)
# GH12153
with read_stata(fname, chunksize=4) as itr:
from_chunks = pd.concat(itr)
tm.assert_frame_equal(parsed, from_chunks)
@pytest.mark.parametrize(
"file",
[
"dta2_115",
"dta3_115",
"dta4_115",
"dta14_115",
"dta15_115",
"dta16_115",
"dta17_115",
"dta18_115",
"dta19_115",
"dta20_115",
],
)
@pytest.mark.parametrize("chunksize", [1, 2])
@pytest.mark.parametrize("convert_categoricals", [False, True])
@pytest.mark.parametrize("convert_dates", [False, True])
def test_read_chunks_115(
self, file, chunksize, convert_categoricals, convert_dates
):
fname = getattr(self, file)
# Read the whole file
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
parsed = read_stata(
fname,
convert_categoricals=convert_categoricals,
convert_dates=convert_dates,
)
# Compare to what we get when reading by chunk
itr = read_stata(
fname,
iterator=True,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
)
pos = 0
for j in range(5):
with warnings.catch_warnings(record=True) as w: # noqa
warnings.simplefilter("always")
try:
chunk = itr.read(chunksize)
except StopIteration:
break
from_frame = parsed.iloc[pos : pos + chunksize, :]
tm.assert_frame_equal(
from_frame,
chunk,
check_dtype=False,
check_datetimelike_compat=True,
check_categorical=False,
)
pos += chunksize
itr.close()
def test_read_chunks_columns(self):
fname = self.dta3_117
columns = ["quarter", "cpi", "m1"]
chunksize = 2
parsed = read_stata(fname, columns=columns)
with read_stata(fname, iterator=True) as itr:
pos = 0
for j in range(5):
chunk = itr.read(chunksize, columns=columns)
if chunk is None:
break
from_frame = parsed.iloc[pos : pos + chunksize, :]
tm.assert_frame_equal(from_frame, chunk, check_dtype=False)
pos += chunksize
@pytest.mark.parametrize("version", [114, 117])
def test_write_variable_labels(self, version):
# GH 13631, add support for writing variable labels
original = pd.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [1.0, 3.0, 27.0, 81.0],
"c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
}
)
original.index.name = "index"
variable_labels = {"a": "City Rank", "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
expected_labels = {
"index": "",
"a": "City Rank",
"b": "City Exponent",
"c": "City",
}
assert read_labels == expected_labels
variable_labels["index"] = "The Index"
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels, version=version)
with StataReader(path) as sr:
read_labels = sr.variable_labels()
assert read_labels == variable_labels
@pytest.mark.parametrize("version", [114, 117])
def test_invalid_variable_labels(self, version):
original = pd.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [1.0, 3.0, 27.0, 81.0],
"c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
}
)
original.index.name = "index"
variable_labels = {"a": "very long" * 10, "b": "City Exponent", "c": "City"}
with tm.ensure_clean() as path:
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
original.to_stata(
path, variable_labels=variable_labels, version=version
)
variable_labels["a"] = "invalid character Œ"
with tm.ensure_clean() as path:
msg = (
"Variable labels must contain only characters that can be"
" encoded in Latin-1"
)
with pytest.raises(ValueError, match=msg):
original.to_stata(
path, variable_labels=variable_labels, version=version
)
def test_write_variable_label_errors(self):
original = pd.DataFrame(
{
"a": [1, 2, 3, 4],
"b": [1.0, 3.0, 27.0, 81.0],
"c": ["Atlanta", "Birmingham", "Cincinnati", "Detroit"],
}
)
values = ["\u03A1", "\u0391", "\u039D", "\u0394", "\u0391", "\u03A3"]
variable_labels_utf8 = {
"a": "City Rank",
"b": "City Exponent",
"c": "".join(values),
}
msg = (
"Variable labels must contain only characters that can be"
" encoded in Latin-1"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_utf8)
variable_labels_long = {
"a": "City Rank",
"b": "City Exponent",
"c": "A very, very, very long variable label "
"that is too long for Stata which means "
"that it has more than 80 characters",
}
msg = "Variable labels must be 80 characters or fewer"
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path, variable_labels=variable_labels_long)
def test_default_date_conversion(self):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = pd.DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with tm.ensure_clean() as path:
original.to_stata(path, write_index=False)
reread = read_stata(path, convert_dates=True)
tm.assert_frame_equal(original, reread)
original.to_stata(path, write_index=False, convert_dates={"dates": "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
dates_idx = original.columns.tolist().index("dates")
original.to_stata(path, write_index=False, convert_dates={dates_idx: "tc"})
direct = read_stata(path, convert_dates=True)
tm.assert_frame_equal(reread, direct)
def test_unsupported_type(self):
original = pd.DataFrame({"a": [1 + 2j, 2 + 4j]})
msg = "Data type complex128 not supported"
with pytest.raises(NotImplementedError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_unsupported_datetype(self):
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = pd.DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
msg = "Format %tC not implemented"
with pytest.raises(NotImplementedError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path, convert_dates={"dates": "tC"})
dates = pd.date_range("1-1-1990", periods=3, tz="Asia/Hong_Kong")
original = pd.DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with pytest.raises(NotImplementedError):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_repeated_column_labels(self):
# GH 13923, 25772
msg = """
Value labels for column ethnicsn are not unique. These cannot be converted to
pandas categoricals.
Either read the file with `convert_categoricals` set to False or use the
low level interface in `StataReader` to separately read the values and the
value_labels.
The repeated labels are:\n-+\nwolof
"""
with pytest.raises(ValueError, match=msg):
read_stata(self.dta23, convert_categoricals=True)
def test_stata_111(self):
# 111 is an old version but still used by current versions of
# SAS when exporting to Stata format. We do not know of any
# on-line documentation for this version.
df = read_stata(self.dta24_111)
original = pd.DataFrame(
{
"y": [1, 1, 1, 1, 1, 0, 0, np.NaN, 0, 0],
"x": [1, 2, 1, 3, np.NaN, 4, 3, 5, 1, 6],
"w": [2, np.NaN, 5, 2, 4, 4, 3, 1, 2, 3],
"z": ["a", "b", "c", "d", "e", "", "g", "h", "i", "j"],
}
)
original = original[["y", "x", "w", "z"]]
tm.assert_frame_equal(original, df)
def test_out_of_range_double(self):
# GH 14618
df = DataFrame(
{
"ColumnOk": [0.0, np.finfo(np.double).eps, 4.49423283715579e307],
"ColumnTooBig": [0.0, np.finfo(np.double).eps, np.finfo(np.double).max],
}
)
msg = (
r"Column ColumnTooBig has a maximum value \(.+\)"
r" outside the range supported by Stata \(.+\)"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
df.to_stata(path)
df.loc[2, "ColumnTooBig"] = np.inf
msg = (
"Column ColumnTooBig has a maximum value of infinity which"
" is outside the range supported by Stata"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
df.to_stata(path)
def test_out_of_range_float(self):
original = DataFrame(
{
"ColumnOk": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max / 10.0,
],
"ColumnTooBig": [
0.0,
np.finfo(np.float32).eps,
np.finfo(np.float32).max,
],
}
)
original.index.name = "index"
for col in original:
original[col] = original[col].astype(np.float32)
with tm.ensure_clean() as path:
original.to_stata(path)
reread = read_stata(path)
original["ColumnTooBig"] = original["ColumnTooBig"].astype(np.float64)
tm.assert_frame_equal(original, reread.set_index("index"))
original.loc[2, "ColumnTooBig"] = np.inf
msg = (
"Column ColumnTooBig has a maximum value of infinity which"
" is outside the range supported by Stata"
)
with pytest.raises(ValueError, match=msg):
with tm.ensure_clean() as path:
original.to_stata(path)
def test_path_pathlib(self):
df = tm.makeDataFrame()
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_pathlib(df.to_stata, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
df.index.name = "index"
reader = lambda x: read_stata(x).set_index("index")
result = tm.round_trip_localpath(df.to_stata, reader)
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize("write_index", [True, False])
def test_value_labels_iterator(self, write_index):
# GH 16923
d = {"A": ["B", "E", "C", "A", "E"]}
df = pd.DataFrame(data=d)
df["A"] = df["A"].astype("category")
with tm.ensure_clean() as path:
df.to_stata(path, write_index=write_index)
with pd.read_stata(path, iterator=True) as dta_iter:
value_labels = dta_iter.value_labels()
assert value_labels == {"A": {0: "A", 1: "B", 2: "C", 3: "E"}}
def test_set_index(self):
# GH 17328
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(path)
reread = pd.read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
@pytest.mark.parametrize(
"column", ["ms", "day", "week", "month", "qtr", "half", "yr"]
)
def test_date_parsing_ignores_format_details(self, column):
# GH 17797
#
# Test that display formats are ignored when determining if a numeric
# column is a date value.
#
# All date types are stored as numbers and format associated with the
# column denotes both the type of the date and the display format.
#
# STATA supports 9 date types which each have distinct units. We test 7
# of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that
# accounts for leap seconds and %tb relies on STATAs business calendar.
df = read_stata(self.stata_dates)
unformatted = df.loc[0, column]
formatted = df.loc[0, column + "_fmt"]
assert unformatted == formatted
def test_writer_117(self):
original = DataFrame(
data=[
[
"string",
"object",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-25"),
"a",
"a" * 2045,
"a" * 5000,
"a",
],
[
"string-1",
"object-1",
1,
1,
1,
1.1,
1.1,
np.datetime64("2003-12-26"),
"b",
"b" * 2045,
"",
"",
],
],
columns=[
"string",
"object",
"int8",
"int16",
"int32",
"float32",
"float64",
"datetime",
"s1",
"s2045",
"srtl",
"forced_strl",
],
)
original["object"] = Series(original["object"], dtype=object)
original["int8"] = Series(original["int8"], dtype=np.int8)
original["int16"] = Series(original["int16"], dtype=np.int16)
original["int32"] = original["int32"].astype(np.int32)
original["float32"] = Series(original["float32"], dtype=np.float32)
original.index.name = "index"
original.index = original.index.astype(np.int32)
copy = original.copy()
with tm.ensure_clean() as path:
original.to_stata(
path,
convert_dates={"datetime": "tc"},
convert_strl=["forced_strl"],
version=117,
)
written_and_read_again = self.read_dta(path)
# original.index is np.int32, read index is np.int64
tm.assert_frame_equal(
written_and_read_again.set_index("index"),
original,
check_index_type=False,
)
tm.assert_frame_equal(original, copy)
def test_convert_strl_name_swap(self):
original = DataFrame(
[["a" * 3000, "A", "apple"], ["b" * 1000, "B", "banana"]],
columns=["long1" * 10, "long", 1],
)
original.index.name = "index"
with tm.assert_produces_warning(pd.io.stata.InvalidColumnName):
with tm.ensure_clean() as path:
original.to_stata(path, convert_strl=["long", 1], version=117)
reread = self.read_dta(path)
reread = reread.set_index("index")
reread.columns = original.columns
tm.assert_frame_equal(reread, original, check_index_type=False)
def test_invalid_date_conversion(self):
# GH 12259
dates = [
dt.datetime(1999, 12, 31, 12, 12, 12, 12000),
dt.datetime(2012, 12, 21, 12, 21, 12, 21000),
dt.datetime(1776, 7, 4, 7, 4, 7, 4000),
]
original = pd.DataFrame(
{
"nums": [1.0, 2.0, 3.0],
"strs": ["apple", "banana", "cherry"],
"dates": dates,
}
)
with tm.ensure_clean() as path:
msg = "convert_dates key must be a column or an integer"
with pytest.raises(ValueError, match=msg):
original.to_stata(path, convert_dates={"wrong_name": "tc"})
@pytest.mark.parametrize("version", [114, 117])
def test_nonfile_writing(self, version):
# GH 21041
bio = io.BytesIO()
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
df.to_stata(bio, version=version)
bio.seek(0)
with open(path, "wb") as dta:
dta.write(bio.read())
reread = pd.read_stata(path, index_col="index")
tm.assert_frame_equal(df, reread)
def test_gzip_writing(self):
# writing version 117 requires seek and cannot be used with gzip
df = tm.makeDataFrame()
df.index.name = "index"
with tm.ensure_clean() as path:
with gzip.GzipFile(path, "wb") as gz:
df.to_stata(gz, version=114)
with gzip.GzipFile(path, "rb") as gz:
reread = pd.read_stata(gz, index_col="index")
tm.assert_frame_equal(df, reread)
def test_unicode_dta_118(self):
unicode_df = self.read_dta(self.dta25_118)
columns = ["utf8", "latin1", "ascii", "utf8_strl", "ascii_strl"]
values = [
["ραηδας", "PÄNDÄS", "p", "ραηδας", "p"],
["ƤĀńĐąŜ", "Ö", "a", "ƤĀńĐąŜ", "a"],
["ᴘᴀᴎᴅᴀS", "Ü", "n", "ᴘᴀᴎᴅᴀS", "n"],
[" ", " ", "d", " ", "d"],
[" ", "", "a", " ", "a"],
["", "", "s", "", "s"],
["", "", " ", "", " "],
]
expected = pd.DataFrame(values, columns=columns)
tm.assert_frame_equal(unicode_df, expected)
def test_mixed_string_strl(self):
# GH 23633
output = [{"mixed": "string" * 500, "number": 0}, {"mixed": None, "number": 1}]
output = pd.DataFrame(output)
output.number = output.number.astype("int32")
with tm.ensure_clean() as path:
output.to_stata(path, write_index=False, version=117)
reread = read_stata(path)
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
# Check strl supports all None (null)
output.loc[:, "mixed"] = None
output.to_stata(
path, write_index=False, convert_strl=["mixed"], version=117
)
reread = read_stata(path)
expected = output.fillna("")
tm.assert_frame_equal(reread, expected)
@pytest.mark.parametrize("version", [114, 117])
def test_all_none_exception(self, version):
output = [{"none": "none", "number": 0}, {"none": None, "number": 1}]
output = pd.DataFrame(output)
output.loc[:, "none"] = None
with tm.ensure_clean() as path:
msg = (
r"Column `none` cannot be exported\.\n\n"
"Only string-like object arrays containing all strings or a"
r" mix of strings and None can be exported\. Object arrays"
r" containing only null values are prohibited\. Other"
" object typescannot be exported and must first be"
r" converted to one of the supported types\."
)
with pytest.raises(ValueError, match=msg):
output.to_stata(path, version=version)
@pytest.mark.parametrize("version", [114, 117])
def test_invalid_file_not_written(self, version):
content = "Here is one __�__ Another one __·__ Another one __½__"
df = DataFrame([content], columns=["invalid"])
with tm.ensure_clean() as path:
msg1 = (
r"'latin-1' codec can't encode character '\\ufffd'"
r" in position 14: ordinal not in range\(256\)"
)
msg2 = (
"'ascii' codec can't decode byte 0xef in position 14:"
r" ordinal not in range\(128\)"
)
with pytest.raises(UnicodeEncodeError, match=r"{}|{}".format(msg1, msg2)):
with tm.assert_produces_warning(ResourceWarning):
df.to_stata(path)
def test_strl_latin1(self):
# GH 23573, correct GSO data to reflect correct size
output = DataFrame(
[["pandas"] * 2, ["þâÑÐŧ"] * 2], columns=["var_str", "var_strl"]
)
with tm.ensure_clean() as path:
output.to_stata(path, version=117, convert_strl=["var_strl"])
with open(path, "rb") as reread:
content = reread.read()
expected = "þâÑÐŧ"
assert expected.encode("latin-1") in content
assert expected.encode("utf-8") in content
gsos = content.split(b"strls")[1][1:-2]
for gso in gsos.split(b"GSO")[1:]:
val = gso.split(b"\x00")[-2]
size = gso[gso.find(b"\x82") + 1]
assert len(val) == size - 1
def test_encoding_latin1_118(self):
# GH 25960
msg = """
One or more strings in the dta file could not be decoded using utf-8, and
so the fallback encoding of latin-1 is being used. This can happen when a file
has been incorrectly encoded by Stata or some other software. You should verify
the string values returned are correct."""
with tm.assert_produces_warning(UnicodeWarning) as w:
encoded = read_stata(self.dta_encoding_118)
assert len(w) == 151
assert w[0].message.args[0] == msg
expected = pd.DataFrame([["Düsseldorf"]] * 151, columns=["kreis1849"])
tm.assert_frame_equal(encoded, expected)
| bsd-3-clause |
Achuth17/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 272 | 6972 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
Fenugreek/tamarind | density.py | 1 | 5813 | """
Find areas of high density in 2D data.
API follows that of scikit learn (e.g. sklearn.cluster.Kmeans).
"""
import numpy
from scipy.signal import convolve2d, get_window
import cPickle
def _get_window(desc, shape, size=None):
window_shape = list(shape)
for i in range(2):
if shape[i] < 1: window_shape[i] = int(shape[i] * size)
# round up to nearest odd integer
if (window_shape[i] + 1) % 2: window_shape[i] = int(window_shape[i]) + 1
if type(desc) == 'str': return get_window(desc, window_shape)
# desc is like ('gaussian', 0.3).
# In these cases, scipy.signal.get_window() doesn't handle 2D shapes.
# So compute 1D windows first, and then multiply them together.
if desc[1] < 1:
desc = [(desc[0], int(desc[1] * s)) for s in window_shape]
else: desc = [desc, desc]
window = [get_window(d, s) for d, s in zip(desc, window_shape)]
return numpy.array(numpy.mat(window[1]).T * numpy.mat(window[0]))
class KWindows(object):
"""
Find centers of high density, using local averaging and finding peaks (cf. heatmaps).
Currently implemented only for 2D data, with inefficiencies in recomputing the
convolution in some cases when unnecessary, and not using FFT.
"""
def __init__(self, K=100, min_count=0.0005, bins=100,
window=('gaussian', 0.3), shape=(0.1, 0.1), circular=True):
self.params = {'bins': bins,
'window': window,
'shape': shape,
'K': K,
'circular': circular,
'min_count': min_count}
self.window_ = _get_window(window, shape, bins)
if circular:
dist = [numpy.arange(s + 0.0) - (s - 1) / 2 for s in self.window_.shape]
dist = [d / d[-1] for d in dist]
dist = dist[0]**2 + dist[1][:, numpy.newaxis]**2
self.window_mask_ = dist <= 1.0
else: self.window_mask_ = numpy.ones(self.window_.shape, dtype=bool)
self.window_[~self.window_mask_] = 0.0
# normalize so average value inside mask is 1.0
self.window_ /= numpy.sum(self.window_) / numpy.sum(self.window_mask_)
def fit(self, x1, x2, range=None):
params = self.params
bins = params['bins']
window_center = [(s - 1) / 2 for s in self.window_.shape]
self.histogram2d_ = numpy.histogram2d(x1, x2, bins=bins, range=range)
bin_counts, bin_edges = self.histogram2d_[0], self.histogram2d_[1:]
min_count = params['min_count'] if params['min_count'] >= 1 else \
numpy.sum(bin_counts) * params['min_count']
self.first_convolution_ = convolve2d(bin_counts, self.window_, mode='valid')
max_idx = numpy.unravel_index(self.first_convolution_.argmax(),
self.first_convolution_.shape)
self.weights_ = [self.first_convolution_[max_idx]]
binX, binY = max_idx[0] + window_center[0], max_idx[1] + window_center[1]
self.bins_ = [(binX, binY)]
self.centers_ = [(numpy.mean(bin_edges[0][binX : binX + 2]),
numpy.mean(bin_edges[1][binY : binY + 2]))]
self.last_convolution_ = self.first_convolution_
self.dense_mask_ = numpy.zeros(bin_counts.shape, dtype=bool)
self.dense_mask_[max_idx[0] : binX + window_center[0] + 1,
max_idx[1] : binY + window_center[1] + 1] |= \
self.window_mask_
self.counts_ = [numpy.sum(bin_counts[self.dense_mask_])]
fill_size = bin_counts.size - numpy.sum(self.window_mask_)
while len(self.centers_) < (params['K'] or bin_counts.size) and \
self.counts_[-1] > min_count and \
(numpy.sum(self.dense_mask_) < fill_size):
bin_counts = self.histogram2d_[0].copy()
bin_counts[self.dense_mask_] = 0
convolution = convolve2d(bin_counts, self.window_, mode='valid')
# Don't find a center that's in a previously determined dense area
masked = numpy.ma.array(convolution,
mask=self.dense_mask_[window_center[0]:-window_center[0],
window_center[1]:-window_center[1]])
max_idx = numpy.unravel_index(masked.argmax(), masked.shape)
self.weights_.append(convolution[max_idx])
binX, binY = max_idx[0] + window_center[0], max_idx[1] + window_center[1]
self.bins_.append((binX, binY))
self.centers_.append((numpy.mean(bin_edges[0][binX : binX + 2]),
numpy.mean(bin_edges[1][binY : binY + 2])))
self.last_convolution_ = convolution
self.dense_mask_[max_idx[0] : binX + window_center[0] + 1,
max_idx[1] : binY + window_center[1] + 1] |= \
self.window_mask_
self.counts_.append(numpy.sum(bin_counts[self.dense_mask_]))
def dump(self, filename):
"""
Writes relevant attributes to file referenced by filename via cPickle.
Does not write to stdout.
"""
fh = open(filename, 'w')
if (fh == None): raise IOError('Unable to open' + filename)
data = {}
for attribute in ('params', 'window_', 'window_mask_',
'histogram2d_', 'weights_', 'counts_', 'bins_', 'centers_',
'first_convolution_', 'last_convolution_'):
if hasattr(self, attribute): data[attribute] = getattr(self, attribute)
cPickle.dump(data, fh, -1)
fh.close()
| gpl-3.0 |
jason-neal/equanimous-octo-tribble | octotribble/IOmodule.py | 1 | 28786 | ################################################################################
#
# Functions to read column-separated files
#
################################################################################
import numpy as np
import pandas as pd
def pdread_2col(filename, noheader=False):
"""Read in a 2 column file with pandas.
Faster then read_2col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file.
Default = False.
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values
def pdread_3col(filename, noheader=False):
"""Read in a 3 column file with pandas.
Faster then read_3col
Parameters
----------
filename: str
Name of file to read.
noheader: bool
Flag indicating if there is no column names given in file
Returns
-------
col1: ndarray
First column as float64.
col2: ndarray
Second column as float64.
col3: ndarray
Third column as float64.
"""
if noheader:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
header=None, dtype=np.float64, delim_whitespace=True)
else:
data = pd.read_table(filename, comment='#', names=["col1", "col2", "col3"],
dtype=np.float64, delim_whitespace=True)
return data["col1"].values, data["col2"].values, data["col3"].values
def read_fullcol(filename):
"""This program reads column formatted data from a file and returns a list in which each sublist correspond to the line's elements.
THE RESULT IS A LIST OF STRINGS!
"""
f = open(filename, "r")
list_data = []
while 1:
line = f.readline()
if line == "":
break
if line[0] == '#':
continue
list_data.append(line)
f.close()
return list_data
def read_col(filename):
"""This program reads column formatted data from a file and returns a list in which each sublist correspond to the line's elements.
THE RESULT IS A LIST OF STRINGS!
"""
f = open(filename, "r")
list_data = []
while 1:
line = f.readline()
if line == "":
break
if line[0] == '#':
continue
list_data.append(line.strip().split())
f.close()
return list_data
def read_col_charsplit(filename, sepchar):
"""This program reads column formatted data from a file and returns a list in which each sublist correspond to the line's elements separated by sepchar.
THE RESULT IS A LIST OF STRINGS!
"""
f = open(filename, "r")
list_data = []
while 1:
line = f.readline()
if line == "":
break
if line[0] == '#':
continue
list_data.append(line.strip().split(sepchar))
f.close()
return list_data
def read_2col(filename):
"""The same as the previous, but returns 2 vectors, corresponding each one to a column.
THE RESULTS ARE FLOAT PYTHON VECTORS.
# Note that in python all "float" are in fact "double-precision".
"""
list_data = read_col(filename)
col1 = []
col2 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
return [col1, col2]
def read_2col1str(filename):
"""The same as the previous, but returns 2 columns and the first is a string."""
list_data = read_col(filename)
col1 = []
col2 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
return [col1, col2]
def read_3col(filename):
"""The same as the previous, but returns 3 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_3col1str(filename):
"""The same as the previous, but returns 3 columns and the first is a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_3col2str(filename):
"""The same as the previous, but returns 3 columns and the first two are strings."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(list_data[i][1])
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_4col(filename):
"""The same as the previous, but returns 4 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
return [col1, col2, col3, col4]
def read_4col1str(filename):
"""The same as the previous, but returns 4 columns and the first is a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
return [col1, col2, col3, col4]
def read_5col(filename):
"""The same as the previous, but returns 5 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
return [col1, col2, col3, col4, col5]
def read_5col1str(filename):
"""The same as the previous, but returns 5 columns of which one is a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
return [col1, col2, col3, col4, col5]
def read_6col1str(filename):
"""The same as the previous, but returns 6 columns and the first is a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
return [col1, col2, col3, col4, col5, col6]
def read_6col(filename):
"""The same as the previous, but returns 6 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
return [col1, col2, col3, col4, col5, col6]
def read_7col(filename):
"""The same as the previous, but returns 5 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
return [col1, col2, col3, col4, col5, col6, col7]
def read_7col1str(filename):
"""The same as the previous, but returns 7 columns being the first a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
return [col1, col2, col3, col4, col5, col6, col7]
def read_8col(filename):
"""The same as the previous, but returns 8 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
return [col1, col2, col3, col4, col5, col6, col7, col8]
def read_8col1str(filename):
"""The same as the previous, but returns 8 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
return [col1, col2, col3, col4, col5, col6, col7, col8]
def read_9col(filename):
"""The same as the previous, but returns 9 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9]
def read_9col1str(filename):
"""The same as the previous, but returns 9 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9]
def read_10col(filename):
"""The same as the previous, but returns 11 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]
def read_10col1strl(filename):
"""The same as the previous, but returns 10 columns with the last column being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(list_data[i][9])
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]
def read_11col(filename):
"""The same as the previous, but returns 11 columns."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
col11.append(float(list_data[i][10]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11]
def read_11col1str(filename):
"""The same as the previous, but returns 11 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
col11.append(float(list_data[i][10]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11]
def read_12col(filename):
"""The same as the previous, but returns 12 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
col12 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
col11.append(float(list_data[i][10]))
col12.append(float(list_data[i][11]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12]
def read_12col1str(filename):
"""The same as the previous, but returns 12 columns the first being a string."""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
col12 = []
for i, val in enumerate(list_data):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(list_data[i][0])
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
col11.append(float(list_data[i][10]))
col12.append(float(list_data[i][11]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10, col11, col12]
def read_2col_rdb(filename):
"""The same as the previous, but returns 2 columns.
This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
return [col1, col2]
def read_3col_rdb(filename):
"""The same as the previous, but returns 3 columns.
This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
return [col1, col2, col3]
def read_4col_rdb(filename):
"""The same as the previous, but returns 6 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
return [col1, col2, col3, col4]
def read_5col_rdb(filename):
"""The same as the previous, but returns 6 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
return [col1, col2, col3, col4, col5]
def read_6col_rdb(filename):
"""The same as the previous, but returns 6 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
return [col1, col2, col3, col4, col5, col6]
def read_7col_rdb(filename):
"""The same as the previous, but returns 7 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
return [col1, col2, col3, col4, col5, col6, col7]
def read_10col_rdb(filename):
"""The same as the previous, but returns 10 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9, col10]
def read_15col_rdb(filename):
"""The same as the previous, but returns 10 columns.
# This is particularly usefull to read the "*_coralie.rdb" files
"""
list_data = read_col(filename)
col1 = []
col2 = []
col3 = []
col4 = []
col5 = []
col6 = []
col7 = []
col8 = []
col9 = []
col10 = []
col11 = []
col12 = []
col13 = []
col14 = []
col15 = []
for i in range(2, len(list_data)):
# checking if the line is valid
if(list_data[i][0][0] != '#'):
col1.append(float(list_data[i][0]))
col2.append(float(list_data[i][1]))
col3.append(float(list_data[i][2]))
col4.append(float(list_data[i][3]))
col5.append(float(list_data[i][4]))
col6.append(float(list_data[i][5]))
col7.append(float(list_data[i][6]))
col8.append(float(list_data[i][7]))
col9.append(float(list_data[i][8]))
col10.append(float(list_data[i][9]))
col11.append(float(list_data[i][10]))
col12.append(float(list_data[i][11]))
col13.append(float(list_data[i][12]))
col14.append(float(list_data[i][13]))
col15.append(float(list_data[i][14]))
return [col1, col2, col3, col4, col5, col6, col7, col8, col9,
col10, col11, col12, col13, col14, col15]
def read_Gauss():
"""The same as the previous, but returns 10 columns."""
list_data = read_col(("/scisoft/i386/Packages/Python-2.4.3/myownpackages/GaussDist4py.txt"))
gauss_data = []
for i, val in enumerate(list_data):
for j, val in enumerate(list_data[0]):
gauss_data.append(float(list_data[i][j]))
return gauss_data
################################################################################
#
# Functions to write files in column-separated formats
#
################################################################################
def write_2col(filename, data1, data2):
"""Write data in 2 columns separated by tabs in a "filename" file."""
f = open(filename, "w")
for i, val in enumerate(data1):
f.write("\t" + str(data1[i]) + "\t\t" + str(data2[i]) + "\n")
f.close()
def write_3col(filename, data1, data2, data3):
"""Write data in 2 columns separated by tabs in a "filename" file."""
f = open(filename, "w")
for i, val in enumerate(data1):
f.write("\t" + str(data1[i]) + "\t\t" + str(data2[i]) + "\t\t" + str(data3[i]) + "\n")
f.close()
def write_4col(filename, data1, data2, data3, data4):
"""Write data in 2 columns separated by tabs in a "filename" file."""
f = open(filename, "w")
for i, val in enumerate(data1):
f.write("\t" + str(data1[i]) + "\t\t" + str(data2[i]) + "\t\t" + str(data3[i]) +
"\t\t" + str(data4[i]) + "\n")
f.close()
def write_5col(filename, data1, data2, data3, data4, data5):
"""Write data in 5 columns separated by tabs in a "filename" file."""
f = open(filename, "w")
for i, val in enumerate(data1):
f.write("\t" + str(data1[i]) + "\t\t" + str(data2[i]) + "\t\t" + str(data3[i]) +
"\t\t" + str(data4[i]) + "\t\t" + str(data5[i]) + "\n")
f.close()
| mit |
proto-n/Alpenglow | python/examples/sum_factor_and_popularity.py | 2 | 2570 | import alpenglow as ag
import alpenglow.Getter as rs
import alpenglow.experiments
import alpenglow.evaluation
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
data = pd.read_csv(
"http://info.ilab.sztaki.hu/~fbobee/alpenglow/recoded_online_id_artist_first_filtered",
sep=' ',
header=None,
names=['time', 'user', 'item', 'id', 'score', 'eval'],
nrows=100000
)
class MyExperiment(ag.OnlineExperiment):
'''
This sample experiment contains a combined model. The combined model constists of a popularity model and a factor model. The prediction of the combined model is the equally weighted sum of the prediction of the popularity model and the factor model, pred_comb=0.5*pred_pop+0.5*pred_factor.
'''
def _config(self, top_k, seed):
model = rs.CombinedModel(**self.parameter_defaults(
los_file_name="my_log_file",
log_frequency=100000,
use_user_weights=False,
))
pop_model = rs.PopularityModel()
model.add_model(pop_model)
pop_updater = rs.PopularityModelUpdater()
pop_updater.set_model(pop_model)
factor_model = rs.FactorModel(**self.parameter_defaults(
begin_min=-0.01,
begin_max=0.01,
dimension=10,
initialize_all=False,
))
model.add_model(factor_model)
factor_updater = rs.FactorModelGradientUpdater(**self.parameter_defaults(
learning_rate=0.05,
regularization_rate=0.0
))
factor_updater.set_model(factor_model)
objective = rs.ObjectiveMSE()
gradient_computer = rs.GradientComputerPointWise()
gradient_computer.set_objective(objective)
gradient_computer.set_model(factor_model)
gradient_computer.add_gradient_updater(factor_updater)
negative_sample_generator = rs.UniformNegativeSampleGenerator(**self.parameter_defaults(
negative_rate=10,
initialize_all=False,
seed=67439852,
filter_repeats=False,
))
negative_sample_generator.add_updater(gradient_computer)
return (model, [pop_updater, negative_sample_generator], [], [])
experiment = MyExperiment(top_k=100, seed=254938879)
rankings = experiment.run(data, verbose=True)
rankings['dcg'] = ag.evaluation.DcgScore(rankings)
day_groups = (rankings['time']-rankings['time'].min())//86400
daily_avg = rankings['dcg'].groupby(day_groups).mean()
plt.figure()
daily_avg.plot()
plt.savefig("sumexperiment.png")
| apache-2.0 |
Bioh4z4rd/scapy | scapy/plist.py | 7 | 20977 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
PacketList: holds several packets and allows to do operations on them.
"""
import os,subprocess
from .config import conf
from .base_classes import BasePacket,BasePacketList
from collections import defaultdict
from .utils import do_graph,hexdump,make_table,make_lined_table,make_tex_table,get_temp_file
from scapy.arch import NETWORKX
if NETWORKX:
import networkx as nx
#############
## Results ##
#############
class PacketList(BasePacketList):
res = []
def __init__(self, res=None, name="PacketList", stats=None, vector_index = None):
"""create a packet list from a list of packets
res: the list of packets
stats: a list of classes that will appear in the stats (defaults to [TCP,UDP,ICMP])"""
if stats is None:
stats = conf.stats_classic_protocols
self.stats = stats
if res is None:
res = []
if isinstance(res, PacketList):
res = res.res
self.res = res
self.listname = name
self.vector_index = vector_index
def __len__(self):
return len(self.res)
def _elt2pkt(self, elt):
if self.vector_index == None:
return elt
else:
return elt[self.vector_index]
def _elt2sum(self, elt):
if self.vector_index == None:
return elt.summary()
else:
return "%s ==> %s" % (elt[0].summary(),elt[1].summary())
def _elt2show(self, elt):
return self._elt2sum(elt)
def __repr__(self):
stats=dict.fromkeys(self.stats,0)
other = 0
for r in self.res:
f = 0
for p in stats:
if self._elt2pkt(r).haslayer(p):
stats[p] += 1
f = 1
break
if not f:
other += 1
s = ""
ct = conf.color_theme
for p in self.stats:
s += " %s%s%s" % (ct.packetlist_proto(p.name),
ct.punct(":"),
ct.packetlist_value(stats[p]))
s += " %s%s%s" % (ct.packetlist_proto("Other"),
ct.punct(":"),
ct.packetlist_value(other))
return "%s%s%s%s%s" % (ct.punct("<"),
ct.packetlist_name(self.listname),
ct.punct(":"),
s,
ct.punct(">"))
def __getattr__(self, attr):
return getattr(self.res, attr)
def __getitem__(self, item):
if isinstance(item,type) and issubclass(item,BasePacket):
#return self.__class__(filter(lambda x: item in self._elt2pkt(x),self.res),
return self.__class__([ x for x in self.res if item in self._elt2pkt(x) ],
name="%s from %s"%(item.__name__,self.listname))
if type(item) is slice:
return self.__class__(self.res.__getitem__(item),
name = "mod %s" % self.listname)
return self.res.__getitem__(item)
def __getslice__(self, *args, **kargs):
return self.__class__(self.res.__getslice__(*args, **kargs),
name="mod %s"%self.listname)
def __add__(self, other):
return self.__class__(self.res+other.res,
name="%s+%s"%(self.listname,other.listname))
def summary(self, prn=None, lfilter=None):
"""prints a summary of each packet
prn: function to apply to each packet instead of lambda x:x.summary()
lfilter: truth function to apply to each packet to decide whether it will be displayed"""
for r in self.res:
if lfilter is not None:
if not lfilter(r):
continue
if prn is None:
print(self._elt2sum(r))
else:
print(prn(r))
def nsummary(self,prn=None, lfilter=None):
"""prints a summary of each packet with the packet's number
prn: function to apply to each packet instead of lambda x:x.summary()
lfilter: truth function to apply to each packet to decide whether it will be displayed"""
for i, p in enumerate(self.res):
if lfilter is not None:
if not lfilter(p):
continue
print(conf.color_theme.id(i,fmt="%04i"), end = " ")
if prn is None:
print(self._elt2sum(p))
else:
print(prn(p))
def display(self): # Deprecated. Use show()
"""deprecated. is show()"""
self.show()
def show(self, *args, **kargs):
"""Best way to display the packet list. Defaults to nsummary() method"""
return self.nsummary(*args, **kargs)
def filter(self, func):
"""Returns a packet list filtered by a truth function"""
return self.__class__(list(filter(func,self.res)),
name="filtered %s"%self.listname)
def plot(self, f, lfilter=None,**kargs):
"""Applies a function to each packet to get a value that will be plotted with matplotlib. A matplotlib object is returned
lfilter: a truth function that decides whether a packet must be ploted"""
return plt.plot([ f(i) for i in self.res if not lfilter or lfilter(i) ], **kargs)
def diffplot(self, f, delay=1, lfilter=None, **kargs):
"""diffplot(f, delay=1, lfilter=None)
Applies a function to couples (l[i],l[i+delay])"""
return plt.plot([ f(i, j) for i in self.res[:-delay] for j in self.res[delay:] if not lfilter or (lfilter(i) and lfilter(j))],
**kargs)
def multiplot(self, f, lfilter=None, **kargs):
"""Uses a function that returns a label and a value for this label, then plots all the values label by label"""
d = defaultdict(list)
for i in self.res:
if lfilter and not lfilter(i):
continue
k, v = f(i)
d[k].append(v)
figure = plt.figure()
ax = figure.add_axes(plt.axes())
for i in d:
ax.plot(d[i], **kargs)
return figure
def rawhexdump(self):
"""Prints an hexadecimal dump of each packet in the list"""
for p in self:
hexdump(self._elt2pkt(p))
def hexraw(self, lfilter=None):
"""Same as nsummary(), except that if a packet has a Raw layer, it will be hexdumped
lfilter: a truth function that decides whether a packet must be displayed"""
for i,p in enumerate(self.res):
p1 = self._elt2pkt(p)
if lfilter is not None and not lfilter(p1):
continue
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p1.sprintf("%.time%"),
self._elt2sum(p)))
if p1.haslayer(conf.raw_layer):
hexdump(p1.getlayer(conf.raw_layer).load)
def hexdump(self, lfilter=None):
"""Same as nsummary(), except that packets are also hexdumped
lfilter: a truth function that decides whether a packet must be displayed"""
for i,p in enumerate(self.res):
p1 = self._elt2pkt(p)
if lfilter is not None and not lfilter(p1):
continue
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p1.sprintf("%.time%"),
self._elt2sum(p)))
hexdump(p1)
def padding(self, lfilter=None):
"""Same as hexraw(), for Padding layer"""
for i,p in enumerate(self.res):
p1 = self._elt2pkt(p)
if p1.haslayer(conf.padding_layer):
if lfilter is None or lfilter(p1):
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p1.sprintf("%.time%"),
self._elt2sum(p)))
hexdump(p1.getlayer(conf.padding_layer).load)
def nzpadding(self, lfilter=None):
"""Same as padding() but only non null padding"""
for i,p in enumerate(self.res):
p1 = self._elt2pkt(p)
if p1.haslayer(conf.padding_layer):
pad = p1.getlayer(conf.padding_layer).load
if pad == pad[0]*len(pad):
continue
if lfilter is None or lfilter(p1):
print("%s %s %s" % (conf.color_theme.id(i,fmt="%04i"),
p1.sprintf("%.time%"),
self._elt2sum(p)))
hexdump(p1.getlayer(conf.padding_layer).load)
def conversations(self, getsrcdst=None, draw = True, **kargs):
"""Graphes a conversations between sources and destinations and display it
(using graphviz)
getsrcdst: a function that takes an element of the list and return the source and dest
by defaults, return source and destination IP
if networkx library is available returns a DiGraph, or draws it if draw = True otherwise graphviz is used
format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option
target: output filename. If None, matplotlib is used to display
prog: which graphviz program to use"""
if getsrcdst is None:
getsrcdst = lambda x:(x['IP'].src, x['IP'].dst)
conv = {}
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except:
#XXX warning()
continue
conv[c] = conv.get(c,0)+1
if NETWORKX: # networkx is available
gr = nx.DiGraph()
for s,d in conv:
if s not in gr:
gr.add_node(s)
if d not in gr:
gr.add_node(d)
gr.add_edge(s, d)
if draw:
return do_graph(gr, **kargs)
else:
return gr
else:
gr = 'digraph "conv" {\n'
for s,d in conv:
gr += '\t "%s" -> "%s"\n' % (s,d)
gr += "}\n"
return do_graph(gr, **kargs)
def afterglow(self, src=None, event=None, dst=None, **kargs):
"""Experimental clone attempt of http://sourceforge.net/projects/afterglow
each datum is reduced as src -> event -> dst and the data are graphed.
by default we have IP.src -> IP.dport -> IP.dst"""
if src is None:
src = lambda x: x['IP'].src
if event is None:
event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
sl = {}
el = {}
dl = {}
for i in self.res:
try:
s,e,d = src(i),event(i),dst(i)
if s in sl:
n,l = sl[s]
n += 1
if e not in l:
l.append(e)
sl[s] = (n,l)
else:
sl[s] = (1,[e])
if e in el:
n,l = el[e]
n+=1
if d not in l:
l.append(d)
el[e] = (n,l)
else:
el[e] = (1,[d])
dl[d] = dl.get(d,0)+1
except:
continue
import math
def normalize(n):
return 2+math.log(n)/4.0
def minmax(x):
m,M = min(x),max(x)
if m == M:
m = 0
if M == 0:
M = 1
return m,M
#mins,maxs = minmax(map(lambda (x,y): x, sl.values()))
mins,maxs = minmax([ a[0] for a in sl.values()])
#mine,maxe = minmax(map(lambda (x,y): x, el.values()))
mine,maxe = minmax([ a[0] for a in el.values()])
mind,maxd = minmax(dl.values())
gr = 'digraph "afterglow" {\n\tedge [len=2.5];\n'
gr += "# src nodes\n"
for s in sl:
n,l = sl[s]; n = 1+(n-mins)/(maxs-mins)
gr += '"src.%s" [label = "%s", shape=box, fillcolor="#FF0000", style=filled, fixedsize=1, height=%.2f,width=%.2f];\n' % (repr(s),repr(s),n,n)
gr += "# event nodes\n"
for e in el:
n,l = el[e]; n = n = 1+(n-mine)/(maxe-mine)
gr += '"evt.%s" [label = "%s", shape=circle, fillcolor="#00FFFF", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(e),repr(e),n,n)
for d in dl:
n = dl[d]; n = n = 1+(n-mind)/(maxd-mind)
gr += '"dst.%s" [label = "%s", shape=triangle, fillcolor="#0000ff", style=filled, fixedsize=1, height=%.2f, width=%.2f];\n' % (repr(d),repr(d),n,n)
gr += "###\n"
for s in sl:
n,l = sl[s]
for e in l:
gr += ' "src.%s" -> "evt.%s";\n' % (repr(s),repr(e))
for e in el:
n,l = el[e]
for d in l:
gr += ' "evt.%s" -> "dst.%s";\n' % (repr(e),repr(d))
gr += "}"
return do_graph(gr, **kargs)
def _dump_document(self, **kargs):
import pyx
d = pyx.document.document()
l = len(self.res)
for i in range(len(self.res)):
elt = self.res[i]
c = self._elt2pkt(elt).canvas_dump(**kargs)
cbb = c.bbox()
c.text(cbb.left(),cbb.top()+1,r"\font\cmssfont=cmss12\cmssfont{Frame %i/%i}" % (i,l),[pyx.text.size.LARGE])
if conf.verb >= 2:
os.write(1,b".")
d.append(pyx.document.page(c, paperformat=pyx.document.paperformat.A4,
margin=1*pyx.unit.t_cm,
fittosize=1))
return d
def psdump(self, filename = None, **kargs):
"""Creates a multipage poscript file with a psdump of every packet
filename: name of the file to write to. If empty, a temporary file is used and
conf.prog.psreader is called"""
d = self._dump_document(**kargs)
if filename is None:
filename = get_temp_file(autoext=".ps")
d.writePSfile(filename)
subprocess.Popen([conf.prog.psreader, filename+".ps"])
else:
d.writePSfile(filename)
print
def pdfdump(self, filename = None, **kargs):
"""Creates a PDF file with a psdump of every packet
filename: name of the file to write to. If empty, a temporary file is used and
conf.prog.pdfreader is called"""
d = self._dump_document(**kargs)
if filename is None:
filename = get_temp_file(autoext=".pdf")
d.writePDFfile(filename)
subprocess.Popen([conf.prog.pdfreader, filename+".pdf"])
else:
d.writePDFfile(filename)
print
def sr(self,multi=0):
"""sr([multi=1]) -> (SndRcvList, PacketList)
Matches packets in the list and return ( (matched couples), (unmatched packets) )"""
remain = self.res[:]
sr = []
i = 0
while i < len(remain):
s = remain[i]
j = i
while j < len(remain)-1:
j += 1
r = remain[j]
if r.answers(s):
sr.append((s,r))
if multi:
remain[i]._answered=1
remain[j]._answered=2
continue
del(remain[j])
del(remain[i])
i -= 1
break
i += 1
if multi:
remain = filter(lambda x:not hasattr(x,"_answered"), remain)
return SndRcvList(sr),PacketList(remain)
def sessions(self, session_extractor=None):
if session_extractor is None:
def session_extractor(p):
sess = "Other"
if 'Ether' in p:
if 'IP' in p:
if 'TCP' in p:
sess = p.sprintf("TCP %IP.src%:%r,TCP.sport% > %IP.dst%:%r,TCP.dport%")
elif 'UDP' in p:
sess = p.sprintf("UDP %IP.src%:%r,UDP.sport% > %IP.dst%:%r,UDP.dport%")
elif 'ICMP' in p:
sess = p.sprintf("ICMP %IP.src% > %IP.dst% type=%r,ICMP.type% code=%r,ICMP.code% id=%ICMP.id%")
else:
sess = p.sprintf("IP %IP.src% > %IP.dst% proto=%IP.proto%")
elif 'ARP' in p:
sess = p.sprintf("ARP %ARP.psrc% > %ARP.pdst%")
else:
sess = p.sprintf("Ethernet type=%04xr,Ether.type%")
return sess
sessions = defaultdict(self.__class__)
for p in self.res:
sess = session_extractor(self._elt2pkt(p))
sessions[sess].append(p)
return dict(sessions)
def replace(self, *args, **kargs):
"""
lst.replace(<field>,[<oldvalue>,]<newvalue>)
lst.replace( (fld,[ov],nv),(fld,[ov,]nv),...)
if ov is None, all values are replaced
ex:
lst.replace( IP.src, "192.168.1.1", "10.0.0.1" )
lst.replace( IP.ttl, 64 )
lst.replace( (IP.ttl, 64), (TCP.sport, 666, 777), )
"""
delete_checksums = kargs.get("delete_checksums",False)
x=PacketList(name="Replaced %s" % self.listname)
if type(args[0]) is not tuple:
args = (args,)
for p in self.res:
p = self._elt2pkt(p)
copied = False
for scheme in args:
fld = scheme[0]
old = scheme[1] # not used if len(scheme) == 2
new = scheme[-1]
for o in fld.owners:
if o in p:
if len(scheme) == 2 or p[o].getfieldval(fld.name) == old:
if not copied:
p = p.copy()
if delete_checksums:
p.delete_checksums()
copied = True
setattr(p[o], fld.name, new)
x.append(p)
return x
class SndRcvList(PacketList):
def __init__(self, res=None, name="Results", stats=None):
PacketList.__init__(self, res, name, stats, vector_index = 1)
def summary(self, prn=None, lfilter=None):
"""prints a summary of each SndRcv packet pair
prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary())
lfilter: truth function to apply to each packet pair to decide whether it will be displayed"""
for s, r in self.res:
if lfilter is not None:
if not lfilter(s, r):
continue
if prn is None:
print(self._elt2sum((s, r)))
else:
print(prn(s, r))
def nsummary(self,prn=None, lfilter=None):
"""prints a summary of each SndRcv packet pair with the pair's number
prn: function to apply to each packet pair instead of lambda s, r: "%s ==> %s" % (s.summary(),r.summary())
lfilter: truth function to apply to each packet pair to decide whether it will be displayed"""
for i, (s, r) in enumerate(self.res):
if lfilter is not None:
if not lfilter(s, r):
continue
print(conf.color_theme.id(i,fmt="%04i"), end = " ")
if prn is None:
print(self._elt2sum((s, r)))
else:
print(prn(s, r))
def filter(self, func):
"""Returns a SndRcv list filtered by a truth function"""
return self.__class__( [ i for i in self.res if func(*i) ], name='filtered %s'%self.listname)
def make_table(self, *args, **kargs):
"""Prints a table using a function that returs for each packet its head column value, head row value and displayed value
ex: p.make_table(lambda s, r:(s[IP].dst, r[TCP].sport, s[TCP].sprintf("%flags%")) """
return make_table(self.res, *args, **kargs)
def make_lined_table(self, *args, **kargs):
"""Same as make_table, but print a table with lines"""
return make_lined_table(self.res, *args, **kargs)
def make_tex_table(self, *args, **kargs):
"""Same as make_table, but print a table with LaTeX syntax"""
return make_tex_table(self.res, *args, **kargs)
| gpl-2.0 |
AlexisEidelman/Til | til/pgm/depart_retirement.py | 2 | 1083 | # -*- coding: utf-8 -*-
import sys
from numpy import maximum, array, ones
from pandas import Series
from utils import output_til_to_liam
from til.pgm.run_pension import run_pension
def depart_retirement(context, yearleg, time_step='year', to_check=False, behavior='taux_plein', cProfile=False):
''' cette fonction renvoie un vecteur de booleens indiquant les personnes partant en retraite
TODO : quand les comportements de départ seront plus complexes créer les .py associés'''
if behavior == 'taux_plein':
dates_tauxplein = run_pension(context, yearleg,
time_step=time_step, to_check=to_check,
output='dates_taux_plein', cProfile=cProfile)
date_tauxplein = maximum(dates_tauxplein['RSI'], dates_tauxplein['RG'], dates_tauxplein['FP'])
dates = output_til_to_liam(output_til=date_tauxplein,
index_til=dates_tauxplein['index'],
context_id=context['id'])
return dates.astype(int)
| gpl-3.0 |
david-ragazzi/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/text.py | 69 | 55366 | """
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text(%g,%g,%s)"%(self._y,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' % str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)"%(self._x,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation == None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation(%g,%g,%s)"%(self.xy[0],self.xy[1],repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
| gpl-3.0 |
omarocegueda/dipy | doc/examples/reconst_dsi.py | 3 | 3360 | """
===========================================
Reconstruct with Diffusion Spectrum Imaging
===========================================
We show how to apply Diffusion Spectrum Imaging [Wedeen08]_ to
diffusion MRI datasets of Cartesian keyhole diffusion gradients.
First import the necessary modules:
"""
from dipy.data import fetch_taiwan_ntu_dsi, read_taiwan_ntu_dsi, get_sphere
from dipy.reconst.dsi import DiffusionSpectrumModel
"""
Download and read the data for this tutorial.
"""
fetch_taiwan_ntu_dsi()
img, gtab = read_taiwan_ntu_dsi()
"""
img contains a nibabel Nifti1Image object (data) and gtab contains a GradientTable
object (gradient information e.g. b-values). For example to read the b-values
it is possible to write print(gtab.bvals).
Load the raw diffusion data and the affine.
"""
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
"""
data.shape ``(96, 96, 60, 203)``
This dataset has anisotropic voxel sizes, therefore reslicing is necessary.
"""
affine = img.affine
"""
Read the voxel size from the image header.
"""
voxel_size = img.header.get_zooms()[:3]
"""
Instantiate the Model and apply it to the data.
"""
dsmodel = DiffusionSpectrumModel(gtab)
"""
Lets just use one slice only from the data.
"""
dataslice = data[:, :, data.shape[2] / 2]
dsfit = dsmodel.fit(dataslice)
"""
Load an odf reconstruction sphere
"""
sphere = get_sphere('symmetric724')
"""
Calculate the ODFs with this specific sphere
"""
ODF = dsfit.odf(sphere)
print('ODF.shape (%d, %d, %d)' % ODF.shape)
"""
ODF.shape ``(96, 96, 724)``
In a similar fashion it is possible to calculate the PDFs of all voxels
in one call with the following way
"""
PDF = dsfit.pdf()
print('PDF.shape (%d, %d, %d, %d, %d)' % PDF.shape)
"""
PDF.shape ``(96, 96, 17, 17, 17)``
We see that even for a single slice this PDF array is close to 345 MBytes so we
really have to be careful with memory usage when use this function with a full
dataset.
The simple solution is to generate/analyze the ODFs/PDFs by iterating through
each voxel and not store them in memory if that is not necessary.
"""
from dipy.core.ndindex import ndindex
for index in ndindex(dataslice.shape[:2]):
pdf = dsmodel.fit(dataslice[index]).pdf()
"""
If you really want to save the PDFs of a full dataset on the disc we recommend
using memory maps (``numpy.memmap``) but still have in mind that even if you do
that for example for a dataset of volume size ``(96, 96, 60)`` you will need about
2.5 GBytes which can take less space when reasonable spheres (with < 1000 vertices)
are used.
Let's now calculate a map of Generalized Fractional Anisotropy (GFA) [Tuch04]_
using the DSI ODFs.
"""
from dipy.reconst.odf import gfa
GFA = gfa(ODF)
import matplotlib.pyplot as plt
fig_hist, ax = plt.subplots(1)
ax.set_axis_off()
plt.imshow(GFA.T)
plt.savefig('dsi_gfa.png', bbox_inches='tight', origin='lower', cmap='gray')
"""
.. figure:: dsi_gfa.png
:align: center
See also :ref:`example_reconst_dsi_metrics` for calculating different types
of DSI maps.
.. [Wedeen08] Wedeen et al., Diffusion spectrum magnetic resonance imaging (DSI)
tractography of crossing fibers, Neuroimage, vol 41, no 4,
1267-1277, 2008.
.. [Tuch04] Tuch, D.S, Q-ball imaging, MRM, vol 52, no 6, 1358-1372, 2004.
.. include:: ../links_names.inc
"""
| bsd-3-clause |
bobmyhill/burnman | examples/example_fit_data.py | 2 | 6885 | # This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
"""
example_fit_data
----------------
This example demonstrates BurnMan's functionality to fit various mineral physics data to
an EoS of the user's choice.
Please note also the separate file example_fit_eos.py, which can be viewed as a more
advanced example in the same general field.
teaches:
- least squares fitting
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import burnman_path # adds the local burnman directory to the path
import burnman
import warnings
assert burnman_path # silence pyflakes warning
if __name__ == "__main__":
# 1) Fitting shear modulus and its derivative to shear wave velocity data
print('1) Fitting shear modulus and its derivative to shear wave velocity data\n')
# First, read in the data from file and convert to SI units.
PTp_data = np.loadtxt('../burnman/data/input_minphys/Murakami_perovskite.txt')
PTp_data[:,0] = PTp_data[:,0]*1.e9
PTp_data[:,2] = PTp_data[:,2]*1.e3
# Make the test mineral
mg_perovskite_test = burnman.Mineral()
mg_perovskite_test.params = {'V_0': 24.45e-6,
'K_0': 281.e9,
'Kprime_0': 4.1,
'molar_mass': .10,
'G_0': 200.e9,
'Gprime_0': 2.}
def best_fit():
return burnman.eos_fitting.fit_PTp_data(mineral = mg_perovskite_test,
flags = 'shear_wave_velocity',
fit_params = ['G_0', 'Gprime_0'],
data = PTp_data,
verbose = False)
pressures = np.linspace(1.e5, 150.e9, 101)
temperatures = pressures*0. + 300.
# Fit to the second order Birch-Murnaghan EoS
mg_perovskite_test.set_method("bm2")
fitted_eos = best_fit()
print('2nd order fit:')
burnman.tools.pretty_print_values(fitted_eos.popt, fitted_eos.pcov, fitted_eos.fit_params)
model_vs_2nd_order_correct = mg_perovskite_test.evaluate(['shear_wave_velocity'],
pressures, temperatures)[0]
with warnings.catch_warnings(record=True) as w:
mg_perovskite_test.set_method("bm3")
print(w[-1].message)
model_vs_2nd_order_incorrect = mg_perovskite_test.evaluate(['shear_wave_velocity'],
pressures, temperatures)[0]
print('')
# Fit to the third order Birch-Murnaghan EoS
mg_perovskite_test.set_method("bm3")
fitted_eos = best_fit()
print('3rd order fit:')
burnman.tools.pretty_print_values(fitted_eos.popt, fitted_eos.pcov, fitted_eos.fit_params)
model_vs_3rd_order_correct = mg_perovskite_test.evaluate(['shear_wave_velocity'],
pressures, temperatures)[0]
with warnings.catch_warnings(record=True) as w:
mg_perovskite_test.set_method("bm2")
print(w[-1].message)
model_vs_3rd_order_incorrect = mg_perovskite_test.evaluate(['shear_wave_velocity'],
pressures, temperatures)[0]
print('')
plt.plot(pressures / 1.e9, model_vs_2nd_order_correct / 1000., color='r',
linestyle='-', linewidth=2, label="Correct 2nd order fit")
plt.plot(pressures / 1.e9, model_vs_2nd_order_incorrect / 1000., color='r',
linestyle='-.', linewidth=2, label="Incorrect 2nd order fit")
plt.plot(pressures / 1.e9, model_vs_3rd_order_correct / 1000., color='b',
linestyle='-', linewidth=2, label="Correct 3rd order fit")
plt.plot(pressures / 1.e9, model_vs_3rd_order_incorrect / 1000., color='b',
linestyle='-.', linewidth=2, label="Incorrect 3rd order fit")
plt.scatter(PTp_data[:,0] / 1.e9, PTp_data[:,2] / 1.e3)
plt.ylim([6.55, 8])
plt.xlim([25., 135.])
plt.ylabel("Shear velocity (km/s)")
plt.xlabel("Pressure (GPa)")
plt.legend(loc="lower right", prop={'size': 12}, frameon=False)
plt.savefig("output_figures/example_fit_data1.png")
plt.show()
# 2) Fitting standard enthalpy and heat capacity to enthalpy data
print('2) Fitting standard enthalpy and heat capacity to enthalpy data\n')
per_SLB = burnman.minerals.SLB_2011.periclase()
per_HP = burnman.minerals.HP_2011_ds62.per()
per_opt = burnman.minerals.HP_2011_ds62.per() # this is the mineral we'll optimise
# Load some example enthalpy data
TH_data = np.loadtxt('../burnman/data/input_fitting/Victor_Douglas_1963_deltaH_MgO.dat')
per_HP.set_state(1.e5, 298.15)
PTH_data = np.array([TH_data[:,0]*0. + 1.e5, TH_data[:,0], TH_data[:,2]*4.184 + per_HP.H]).T
nul = TH_data[:,0]*0.
PTH_covariances = np.array([[nul, nul, nul], [nul, TH_data[:,1], nul], [nul, nul, np.power(TH_data[:,2]*4.184*0.0004, 2.)]]).T
per_opt.params['S_0'] = 6.439*4.184
model = burnman.eos_fitting.fit_PTp_data(mineral = per_opt,
flags = 'H',
fit_params = ['H_0', 'Cp'],
data = PTH_data,
data_covariances = PTH_covariances,
max_lm_iterations = 10,
verbose = False)
print('Optimised values:')
params = ['H_0', 'Cp_a', 'Cp_b', 'Cp_c', 'Cp_d']
burnman.tools.pretty_print_values(model.popt, model.pcov, params)
print('')
# Corner plot
fig=burnman.nonlinear_fitting.corner_plot(model.popt, model.pcov, params)
plt.savefig("output_figures/example_fit_data2.png")
plt.show()
# Plot models
temperatures = np.linspace(200., 2000., 101)
pressures = np.array([298.15] * len(temperatures))
plt.plot(temperatures, per_HP.evaluate(['molar_heat_capacity_p'], pressures, temperatures)[0], linestyle='--', label='HP')
plt.plot(temperatures, per_SLB.evaluate(['molar_heat_capacity_p'], pressures, temperatures)[0], linestyle='--', label='SLB')
plt.plot(temperatures, per_opt.evaluate(['molar_heat_capacity_p'], pressures, temperatures)[0], label='Optimised fit')
plt.legend(loc='lower right')
plt.xlim(0., temperatures[-1])
plt.xlabel('Temperature (K)')
plt.ylabel('Heat capacity (J/K/mol)')
plt.legend(loc="lower right", prop={'size': 12}, frameon=False)
plt.savefig("output_figures/example_fit_data3.png")
plt.show()
| gpl-2.0 |
Calvinxc1/Data_Analytics | old versions/analysis_classes.py | 1 | 8972 | #%% Libraries
import pandas as pd
import numpy as np
from scipy.stats import multivariate_normal as mv_norm
#%% Data Cluster Class
class data_cluster(object):
__input_nodes = None
__output_nodes = None
def __init__(self, cluster_name = None, input_data = None, data_type = None, **kwargs):
self.__name = cluster_name
if input_data is None:
self.__data_table = None
self.__data_info = None
elif type(input_data) is str:
data_table = pd.read_csv(input_data)
self.input_data(data_table, data_type, **kwargs)
elif type(input_data) is pd.core.frame.DataFrame:
self.input_data(input_data, data_type, **kwargs)
else:
raise TypeError('Tried to input ' + str(type(input_data)) + ', can only add Pandas dataframe or string of .csv file location')
## Data input processes
def input_data(self, data_frame, data_input, **kwargs):
if type(data_frame) is not pd.core.frame.DataFrame:
raise TypeError('Tried to input ' + str(type(data_frame)) + ', can only accept Pandas dataframe')
elif (type(data_input) is not list) and (type(data_input) is not tuple):
raise TypeError('Data types must be list or tuple')
else:
final_data = pd.DataFrame()
data_columns = []
data_types = []
data_index = []
data_norms = pd.DataFrame(columns = ['mean', 'stDev'])
for column_data in data_input:
column = column_data[0]
data_type = column_data[1]
parsed_column, norm_data = self.__process_column(data_frame, column, data_type, **kwargs)
data_norms = data_norms.append(norm_data)
data_columns.append(column)
data_types.append(data_type)
data_index.append(list(parsed_column.columns))
final_data = pd.concat([final_data, parsed_column], axis = 1)
self.__data_table = final_data
self.__data_obs = len(final_data.index)
self.__data_feature_count = len(final_data.columns)
self.__data_features = data_columns
self.__data_types = data_types
self.__data_index = data_index
self.__data_norms = data_norms
def __process_column(self, data_frame, column, data_type, drop_first = True, load_na = 'auto', norm = False):
data_column = data_frame.loc[:, [column]]
parsed_column, norm_mean, norm_stdev = self.__data_converter(data_column, data_type, load_na = load_na, norm = norm, drop_first = drop_first)
norm_data = pd.DataFrame([[norm_mean, norm_stdev]], columns = ['mean', 'stDev'], index = [column])
return (parsed_column, norm_data)
def __data_converter(self, data_column, data_type, load_na = 'auto', norm = False, drop_first = True):
if data_type == 'cat':
norm_mean = None
norm_stdev = None
if load_na == 'auto':
if data_column.isnull().values.any():
load_na = True
else:
load_na = False
elif type(load_na) is not bool:
raise TypeError('load_na only accepts booleans, or string "auto"')
parsed_data = pd.get_dummies(data_column.astype(str), dummy_na = load_na, drop_first = drop_first)
elif data_type == 'cont':
if norm:
norm_mean = np.mean(data_column.values)
norm_stdev = np.std(data_column.values)
parsed_data = (data_column - norm_mean) / norm_stdev
else:
norm_mean = None
norm_stdev = None
parsed_data = data_column
else:
raise ValueError('data types can only be "categorical" or "continious"')
return (parsed_data, norm_mean, norm_stdev)
def get_table(self):
return self.__data_table
def get_info(self):
return_info = {
'observations': self.__data_obs,
'feature count': self.__data_feature_count,
'features': self.__data_features,
'feature types': self.__data_types,
'feature index': self.__data_index,
'normalization': self.__data_norms
}
return return_info
## Data Clustering processes
def cluster_data(self, cluster_count, max_iters = 10000):
self.__init_clusters(cluster_count)
self.__update_cluster_assignments()
self.__update_cluster_params()
for i in range(max_iters):
prior_weights = self.__cluster_weights
prior_means = self.__cluster_means
prior_covs = self.__cluster_covs
self.__update_cluster_assignments()
self.__update_cluster_params()
weights_same = prior_weights == self.__cluster_weights
means_same = prior_means == self.__cluster_means
covs_same = prior_covs == self.__cluster_covs
stable_params = np.array([np.all(weights_same), np.all(means_same), np.all(covs_same)])
all_stable = np.all(stable_params)
if all_stable:
break
def __init_clusters(self, cluster_count):
self.__cluster_count = cluster_count
self.__cluster_weights = np.array([1. / cluster_count] * cluster_count)
self.__cluster_means = np.random.normal(size = (cluster_count, self.__data_feature_count))
self.__cluster_covs = self.__init_cov()
def __init_cov(self):
cov = np.empty(shape = (0, self.__data_feature_count, self.__data_feature_count))
for cluster in range(self.__cluster_count):
cluster_ident = np.expand_dims(np.identity(self.__data_feature_count), axis = 0)
cov = np.append(cov, cluster_ident, axis = 0)
return cov
def __update_cluster_assignments(self):
cluster_assigns = np.empty(shape = (self.__data_obs, 0))
for cluster_index in xrange(self.__cluster_count):
cluster_assign = self.__cluster_assign_calc(cluster_index)
cluster_assigns = np.append(cluster_assigns, np.expand_dims(cluster_assign, axis = 1), axis = 1)
cluster_assigns = cluster_assigns / np.sum(cluster_assigns, axis = 1, keepdims = True)
self.__cluster_assigns = cluster_assigns
self.__cluster_obs = np.sum(cluster_assigns, axis = 0)
def __cluster_assign_calc(self, cluster_index):
cluster_weight = self.__cluster_weights[cluster_index]
cluster_mean = self.__cluster_means[cluster_index]
cluster_cov = self.__cluster_covs[cluster_index]
cluster_assign = cluster_weight * mv_norm.pdf(self.__data_table, mean = cluster_mean, cov = cluster_cov)
return cluster_assign
def __update_cluster_params(self):
self.__cluster_weights = self.__update_cluster_weights()
self.__cluster_means = self.__update_cluster_means()
self.__cluster_covs = self.__update_cluster_covs()
def __update_cluster_weights(self):
return self.__cluster_obs / self.__data_obs
def __update_cluster_means(self):
cluster_proportions = np.dot(self.__cluster_assigns.transpose(), self.__data_table.values)
cluster_scales = np.expand_dims(self.__cluster_obs, axis = 1)
return cluster_proportions / cluster_scales
def __update_cluster_covs(self):
cov = np.empty(shape = (0, self.__data_feature_count, self.__data_feature_count))
for cluster_index in range(self.__cluster_count):
cluster_cov = self.__update_cov(cluster_index)
cov = np.append(cov, cluster_cov, axis = 0)
return cov
def __update_cov(self, cluster_index):
cluster_spread = np.expand_dims(self.__data_table.values - self.__cluster_means[cluster_index], axis = 2)
cluster_cov = cluster_spread * np.swapaxes(cluster_spread, 1, 2)
cov_weight = np.expand_dims(self.__cluster_assigns[:, [cluster_index]], axis = 2)
weighted_cov = cluster_cov * cov_weight
final_cov = np.sum(weighted_cov, axis = 0, keepdims = True) / self.__cluster_obs[cluster_index]
return final_cov
def get_clusters(self):
cluster_data = {
'weights': self.__cluster_weights,
'means': self.__cluster_means,
'covariance': self.__cluster_covs,
'assignments': self.__cluster_assigns
}
return cluster_data
#%%
data_file = 'kc_house_data.csv'
data_features = (
('sqft_living', 'cont'),
('bedrooms', 'cont'),
('bathrooms', 'cont'),
('price', 'cont')
)
data_set = data_cluster(cluster_name = 'data', input_data = data_file, data_type = data_features, norm = True)
#%%
data_set.cluster_data(4)
cluster_data = data_set.get_clusters()
| gpl-3.0 |
Srisai85/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
musteryu/Data-Mining | assignment-黄煜-3120100937/question_2.py | 1 | 1072 | from mylib import *
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import math
import random
if __name__ == '__main__':
DIR_PATH = sys.path[0] + '\\'
# normal distribution vector file
nvctr_file1 = DIR_PATH + 'normal_500_1.txt'
nvctr_file2 = DIR_PATH + 'normal_500_2.txt'
# uniform distribution vector file
uvctr_file1 = DIR_PATH + 'uniform_500_1.txt'
uvctr_file2 = DIR_PATH + 'uniform_500_2.txt'
posi = random.randint(0,1000)
if posi < 500:
nvctr = fget_vctr(nvctr_file1, posi)
uvctr = fget_vctr(uvctr_file1, posi)
else:
nvctr = fget_vctr(nvctr_file2, posi - 500)
uvctr = fget_vctr(uvctr_file1, posi - 500)
print("Normal distribution vector:")
print(nvctr)
print("Uniform distribution vector:")
print(uvctr)
ex,var = get_normal_param(nvctr)
a, b = get_uniform_param(uvctr)
print('Parameter of normal distribution:')
print('expectation = ', end='')
print(ex)
print('variance = ', end='')
print(var)
print()
print('Parameter of uniform distribution:')
print('a = ', end='')
print(a)
print('b = ', end='')
print(b)
| gpl-2.0 |
astroML/periodogram | periodogram/timeseries.py | 2 | 1861 | from __future__ import division,print_function
"""
Base class for time series
"""
import numpy as np
import matplotlib.pyplot as plt
class TimeSeries(object):
def init(self, t, f, df=None, mask=None,
band=None):
"""
Base class for time series data
Parameters
----------
t : array_like
times
f : array_like
fluxes, must be same length as times
df : float array_like, optional
uncertainties; if float, then all assumed to be the same;
if array, then must be same length as times and fluxes
mask : array_like or None
band : string or None
Passband that data was taken in.
"""
assert(t.shape == f.shape)
self._t = t
self._f = f
if df is not None:
if np.size(df)==1:
df = np.ones_like(f) * df
else:
assert(df.shape == f.shape)
self._df = df
if mask is None:
mask = np.isnan(f)
self._mask = mask
self.band = band
self.models = []
@property
def t(self):
return self._t[~self._mask]
@property
def f(self):
return self._f[~self._mask]
@property
def df(self):
return self._df[~self._mask]
def add_perodic_model(self, model, *args, **kwargs):
"""Connects and fits PeriodicModeler object
Parameters
----------
model: PeriodicModeler, or string
PeriodicModeler object or string indicating known PeriodicModel
args, kwargs passed on to PeriodicModeler
"""
m = model(*args,**kwargs)
m.fit(self.t, self.f, self.df)
self.models.append(m)
def plot(self, **kwargs):
plt.plot(self.t, self.f, **kwargs)
| mit |
DCSaunders/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/tensorflow_dataframe_test.py | 24 | 13091 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for learn.dataframe.tensorflow_dataframe."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import math
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.contrib.learn.python.learn.dataframe.transforms import densify
from tensorflow.core.example import example_pb2
from tensorflow.python.framework import dtypes
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def _assert_df_equals_dict(expected_df, actual_dict):
for col in expected_df:
if expected_df[col].dtype in [np.float32, np.float64]:
assertion = np.testing.assert_allclose
else:
assertion = np.testing.assert_array_equal
if expected_df[col].dtype.kind in ["O", "S", "U"]:
# Python 2/3 compatibility
# TensorFlow always returns bytes, so we just convert the unicode
# expectations to bytes also before comparing.
expected_values = [x.encode("utf-8") for x in expected_df[col].values]
else:
expected_values = expected_df[col].values
assertion(expected_values,
actual_dict[col],
err_msg="Expected {} in column '{}'; got {}.".format(
expected_values, col, actual_dict[col]))
def _make_test_csv():
f = tempfile.NamedTemporaryFile(
dir=tf.test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
intvalue = np.random.randint(-10, 10)
floatvalue = np.random.rand()
boolvalue = int(np.random.rand() > 0.3)
stringvalue = "S: %.4f" % np.random.rand()
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_csv_sparse():
f = tempfile.NamedTemporaryFile(
dir=tf.test.get_temp_dir(), delete=False, mode="w")
w = csv.writer(f)
w.writerow(["int", "float", "bool", "string"])
for _ in range(100):
# leave columns empty; these will be read as default value (e.g. 0 or NaN)
intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else ""
floatvalue = np.random.rand() if np.random.rand() > 0.5 else ""
boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else ""
stringvalue = (("S: %.4f" % np.random.rand())
if np.random.rand() > 0.5 else "")
row = [intvalue, floatvalue, boolvalue, stringvalue]
w.writerow(row)
f.close()
return f.name
def _make_test_tfrecord():
f = tempfile.NamedTemporaryFile(dir=tf.test.get_temp_dir(), delete=False)
w = tf.python_io.TFRecordWriter(f.name)
for i in range(100):
ex = example_pb2.Example()
ex.features.feature["var_len_int"].int64_list.value.extend(range((i % 3)))
ex.features.feature["fixed_len_float"].float_list.value.extend(
[float(i), 2 * float(i)])
w.write(ex.SerializeToString())
return f.name
class TensorFlowDataFrameTestCase(tf.test.TestCase):
"""Tests for `TensorFlowDataFrame`."""
def _assert_pandas_equals_tensorflow(self, pandas_df, tensorflow_df,
num_batches, batch_size):
self.assertItemsEqual(
list(pandas_df.columns) + ["index"], tensorflow_df.columns())
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
row_numbers = [
total_row_num % pandas_df.shape[0]
for total_row_num in range(batch_size * batch_num, batch_size * (
batch_num + 1))
]
expected_df = pandas_df.iloc[row_numbers]
_assert_df_equals_dict(expected_df, batch)
def testInitFromPandas(self):
"""Test construction from Pandas DataFrame."""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"sparrow": range(10), "ostrich": 1})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df,
batch_size=10,
shuffle=False)
batch = tensorflow_df.run_one_batch()
np.testing.assert_array_equal(pandas_df.index.values, batch["index"],
"Expected index {}; got {}".format(
pandas_df.index.values, batch["index"]))
_assert_df_equals_dict(pandas_df, batch)
def testBatch(self):
"""Tests `batch` method.
`DataFrame.batch()` should iterate through the rows of the
`pandas.DataFrame`, and should "wrap around" when it reaches the last row.
"""
if not HAS_PANDAS:
return
pandas_df = pd.DataFrame({"albatross": range(10),
"bluejay": 1,
"cockatoo": range(0, 20, 2),
"penguin": list("abcdefghij")})
tensorflow_df = df.TensorFlowDataFrame.from_pandas(pandas_df, shuffle=False)
# Rebatch `df` into the following sizes successively.
batch_sizes = [4, 7]
num_batches = 3
final_batch_size = batch_sizes[-1]
for batch_size in batch_sizes:
tensorflow_df = tensorflow_df.batch(batch_size, shuffle=False)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=final_batch_size)
def testFromNumpy(self):
x = np.eye(20)
tensorflow_df = df.TensorFlowDataFrame.from_numpy(x, batch_size=10)
for batch in tensorflow_df.run(30):
for ind, val in zip(batch["index"], batch["value"]):
expected_val = np.zeros_like(val)
expected_val[ind] = 1
np.testing.assert_array_equal(expected_val, val)
def testFromCSV(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
enqueue_size = 7
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
pandas_df = pd.read_csv(data_path)
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
enqueue_size=enqueue_size,
batch_size=batch_size,
shuffle=False,
default_values=default_values)
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromCSVLimitEpoch(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
actual_num_batches = len(result_batches)
self.assertEqual(expected_num_batches, actual_num_batches)
# TODO(soergel): figure out how to dequeue the final small batch
expected_rows = 1696 # num_epochs * 100
actual_rows = sum([len(x["int"]) for x in result_batches])
self.assertEqual(expected_rows, actual_rows)
def testFromCSVWithFeatureSpec(self):
if not HAS_PANDAS:
return
num_batches = 100
batch_size = 8
data_path = _make_test_csv_sparse()
feature_spec = {
"int": tf.FixedLenFeature(None, dtypes.int16, np.nan),
"float": tf.VarLenFeature(dtypes.float16),
"bool": tf.VarLenFeature(dtypes.bool),
"string": tf.FixedLenFeature(None, dtypes.string, "")
}
pandas_df = pd.read_csv(data_path, dtype={"string": object})
# Pandas insanely uses NaN for empty cells in a string column.
# And, we can't use Pandas replace() to fix them because nan != nan
s = pandas_df["string"]
for i in range(0, len(s)):
if isinstance(s[i], float) and math.isnan(s[i]):
pandas_df.set_value(i, "string", "")
tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec(
[data_path],
batch_size=batch_size,
shuffle=False,
feature_spec=feature_spec)
# These columns were sparse; re-densify them for comparison
tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"])
tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"])
self._assert_pandas_equals_tensorflow(pandas_df,
tensorflow_df,
num_batches=num_batches,
batch_size=batch_size)
def testFromExamples(self):
num_batches = 77
enqueue_size = 11
batch_size = 13
data_path = _make_test_tfrecord()
features = {
"fixed_len_float": tf.FixedLenFeature(shape=[2],
dtype=tf.float32,
default_value=[0.0, 0.0]),
"var_len_int": tf.VarLenFeature(dtype=tf.int64)
}
tensorflow_df = df.TensorFlowDataFrame.from_examples(
data_path,
enqueue_size=enqueue_size,
batch_size=batch_size,
features=features,
shuffle=False)
# `test.tfrecord` contains 100 records with two features: var_len_int and
# fixed_len_float. Entry n contains `range(n % 3)` and
# `float(n)` for var_len_int and fixed_len_float,
# respectively.
num_records = 100
def _expected_fixed_len_float(n):
return np.array([float(n), 2 * float(n)])
def _expected_var_len_int(n):
return np.arange(n % 3)
for batch_num, batch in enumerate(tensorflow_df.run(num_batches)):
record_numbers = [
n % num_records
for n in range(batch_num * batch_size, (batch_num + 1) * batch_size)
]
for i, j in enumerate(record_numbers):
np.testing.assert_allclose(
_expected_fixed_len_float(j), batch["fixed_len_float"][i])
var_len_int = batch["var_len_int"]
for i, ind in enumerate(var_len_int.indices):
val = var_len_int.values[i]
expected_row = _expected_var_len_int(record_numbers[ind[0]])
expected_value = expected_row[ind[1]]
np.testing.assert_array_equal(expected_value, val)
def testSplitString(self):
batch_size = 8
num_epochs = 17
expected_num_batches = (num_epochs * 100) // batch_size
data_path = _make_test_csv()
default_values = [0, 0.0, 0, ""]
tensorflow_df = df.TensorFlowDataFrame.from_csv(
[data_path],
batch_size=batch_size,
shuffle=False,
default_values=default_values)
a, b = tensorflow_df.split("string", 0.7) # no rebatching
total_result_batches = list(tensorflow_df.run(num_epochs=num_epochs))
a_result_batches = list(a.run(num_epochs=num_epochs))
b_result_batches = list(b.run(num_epochs=num_epochs))
self.assertEqual(expected_num_batches, len(total_result_batches))
self.assertEqual(expected_num_batches, len(a_result_batches))
self.assertEqual(expected_num_batches, len(b_result_batches))
total_rows = sum([len(x["int"]) for x in total_result_batches])
a_total_rows = sum([len(x["int"]) for x in a_result_batches])
b_total_rows = sum([len(x["int"]) for x in b_result_batches])
print("Split rows: %s => %s, %s" % (total_rows, a_total_rows, b_total_rows))
# TODO(soergel): figure out how to dequeue the final small batch
expected_total_rows = 1696 # (num_epochs * 100)
self.assertEqual(expected_total_rows, total_rows)
self.assertEqual(1087, a_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.7), a_total_rows)
self.assertEqual(609, b_total_rows) # stochastic but deterministic
# self.assertEqual(int(total_rows * 0.3), b_total_rows)
# The strings used for hashing were all unique in the original data, but
# we ran 17 epochs, so each one should appear 17 times. Each copy should
# be hashed into the same partition, so there should be no overlap of the
# keys.
a_strings = set([s for x in a_result_batches for s in x["string"]])
b_strings = set([s for x in b_result_batches for s in x["string"]])
self.assertEqual(frozenset(), a_strings & b_strings)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
OpenSourcePolicyCenter/multi-country | Python/Archive/Stage4/AuxiliaryClass.py | 2 | 116307 | from __future__ import division
import csv
import time
import numpy as np
import scipy as sp
import scipy.optimize as opt
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import AuxiliaryDemographics as demog
#from pure_cython import cy_fillca
class OLG(object):
"""
This object takes all of the parts of calculating the OG multi-country model and stores it into a centralized object. This
has a huge advantage over previous versions as we are now able to quickly access stored parts when we are trying
to expand the code. Before, we had to carefully pass tuples of parameters everywhere and it was easy to get lost in the details.
The variables are listed in alphabetical order of their data type, then alphabetical order of
of their name, so Arrays are listed first, Booleans second, etc.
For each function there are the following categories:
Description: Brief description of what the function does
Inputs: Lists the inputs that the function uses
Variables Called From Object: Lists the variables that the function calls from storage
Variables Stored in Object: Lists the variables that are put into storage
Other Functions Called: Lists the other non-library functions needed to complete the process of the current function
Objects in Function: Lists the variables that are exclusive to that function and are not used again.
Outputs: Lists the outputs that the function puts out.
"""
def __init__(self, countries, HH_Params, Firm_Params, Lever_Params):
"""
Description:
-This creates the object and stores all of the parameters into the object.
-The initialization is the starting point for model, think of this as the
"foundation" for the object.
Inputs:
-self: "self" stores all of the components of the model. To access any part,
simply type "self.variable_name" while in the object and "objectname.variable_name"
outside the object. Every other object function will just take this as given, so
future mentions of self won't be rewritten.
-countries = tuple: contains a dictionary and tuple for countries and their associated number, (i.e USA is country 0, EU
is country 1, etc.)
-Firm_Params = tuple: contains alpha, annualized delta, chi, rho and g_A
-HH_Params = tuple: contains S, I, annualized Beta and sigma.
-Lever_Params = tuple: contains the following boolean levers indicated by the users:
PrintAges,self.CheckerMode,self.Iterate,self.UseDiffDemog,self.UseDiffProductivities,self.Matrix_Time
Variables Stored in Object:
- self.A = Array: [I], Technology level for each country
- self.agestopull = Array: [S], Contains which ages to be used from the data when S<80
- self.e = Array: [I,S,T+S], Labor Productivities
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.lbar = Array: [T+S], Time endowment in each year
- self.CheckerMode = Boolean: Used in conjunction with Checker.py, an MPI code that checks the
robustness of the code. With this activated, the code only prints
the statements that are necessary. This speeds up the robust check
process.
- self.Iterate = Boolean: Activates printing the iteration number and euler errors at each
step of the TPI process.
- PrintAges = Boolean: Prints the ages calculated in the demographics
- self.UseDiffDemog = Boolean: Allows each country to have different demographics
- self.UseDiffProductivities = Boolean: Allows cohorts of different ages to produce different labor productivities
- self.Matrix_Time = Boolean: Prints how long it takes to calculate the 2 parts of the household problem
- self.ShaveTime = Boolean: Activates the use of the Cython module that allows the code to work faster
- self.I_dict = Dictionary: [I], Associates a country with a number
- self.I_touse = List: [I], Roster of countries that are being used
- self.alpha = Scalar: Capital share of production
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference Parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.LastFertilityAge = Int: Last age where agents give birth
- self.LeaveHouseAge = Int: First age where agents don't count as children in utility function
- self.MaxImmigrantAge = Int: No immigration takes place for cohorts older than this age
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.T_1 = Int: Transition year for the demographics
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Other Functions Called:
- getkeyages = Gets the important ages for calculating demographic dynamics like FirstFertilityAge, etc.
- Importdata = Imports the demographic data from CSV files
Objects in Function:
- beta_annual = Scalar: Annualized value for beta. Adjusted by S and stored as self.beta
- delta_annual = Scalar: Annualized value for delta. Adjusted by S and stored as self.delta
"""
#PARAMETER SET UP
#HH Parameters
(self.S, self.I, beta_annual, self.sigma) = HH_Params
self.beta=beta_annual**(70/self.S)
self.T = int(round(6*self.S))
self.T_1 = self.S
if self.S > 50:
self.T_1 = 50
#Demographics Parameters
self.I_dict, self.I_touse = countries
#Firm Parameters
(self.alpha,delta_annual,self.chi,self.rho, self.g_A)= Firm_Params
self.delta=1-(1-delta_annual)**(70/self.S)
#Lever Parameters
(PrintAges,self.CheckerMode,self.Iterate,self.UseDiffDemog,self.UseDiffProductivities,self.Matrix_Time,self.ShaveTime) = Lever_Params
#Getting key ages for calculating demographic dynamics
self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge,\
self.MaxImmigrantAge, self.FirstDyingAge, self.agestopull = demog.getkeyages(self.S,PrintAges)
if self.UseDiffDemog:
self.A = np.ones(self.I)+np.cumsum(np.ones(self.I)*.05)-.05 #Techonological Change, used for when countries are different
else:
self.A = np.ones(self.I) #Techonological Change, used for idential countries
#Initialize Labor Productivities
if self.UseDiffProductivities:
self.e = np.ones((self.I, self.S, self.T+self.S))
self.e[:,self.FirstDyingAge:,:] = 0.3
self.e[:,:self.LeaveHouseAge,:] = 0.3
else:
self.e = np.ones((self.I, self.S, self.T+self.S)) #Labor productivities
self.e_ss=self.e[:,:,-1]
#Initilize Time Endowment
self.lbar = np.cumsum(np.ones(self.T+self.S)*self.g_A)
self.lbar[self.T:] = np.ones(self.S)
self.lbar[:self.T] = np.ones(self.T)
self.lbar_ss=self.lbar[-1]
#Imports all of the data from .CSV files needed for the model
self.Import_Data()
#Initialize counter that will keep track of the number of iterations the time path solver takes
self.Timepath_counter = 1
#DEMOGRAPHICS SET-UP
def Import_Data(self):
"""
Description:
- This function activates importing the .CSV files that contain our demographics data
Variables Called from Object:
- self.agestopull = Array: [S], Contains which ages to be used from the data when S<80
- self.UseDiffDemog = Boolean: True activates using unique country demographic data
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.FirstFertilityAge = Int: First age where agents give birth
- self.LastFertilityAge = Int: Last age where agents give birth
Variables Stored in Object:
- self.all_FertilityAges = Array: [I,S,f_range+T], Fertility rates from a f_range years ago to year T
- self.FertilityRates = Array: [I,S,T], Fertility rates from the present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.N = Array: [I,S,T], Population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T], World population share of each country for each age cohort and year
Other Functions Called:
- None
Objects in Function:
- f_range = Int: Number of fertile years, will be used to correctly store the fertilty data
- index = Int: Unique index for a given country that corresponds to the I_dict
- f_bar = Array: [I,S], Average fertility rate across all countries and cohorts in year T_1,
used to get the SS demographics
- rho_bar = Array: [I,S], Average mortality rate across all countries and cohorts in year T_1,
used to get the SS demographics
Outputs:
- None
"""
self.frange=self.LastFertilityAge+1-self.FirstFertilityAge
self.N=np.zeros((self.I,self.S,self.T))
self.Nhat=np.zeros((self.I,self.S,self.T))
self.all_FertilityRates = np.zeros((self.I, self.S, self.frange+self.T))
self.FertilityRates = np.zeros((self.I, self.S, self.T))
self.MortalityRates = np.zeros((self.I, self.S, self.T))
self.Migrants = np.zeros((self.I, self.S, self.T))
I_all = list(sorted(self.I_dict, key=self.I_dict.get))
#We loop over each country to import its demographic data
for i in xrange(self.I):
#If the bool UseDiffDemog == True, we get the unique country index number for importing from the .CSVs
if self.UseDiffDemog:
index = self.I_dict[self.I_touse[i]]
#Otherwise we just only use the data for one specific country
else:
index = 0
#Importing the data and correctly storing it in our demographics matrices
self.N[i,:,0] = np.loadtxt(("Data_Files/population.csv"),delimiter=',',\
skiprows=1, usecols=[index+1])[self.agestopull]*1000
self.all_FertilityRates[i,self.FirstFertilityAge:self.LastFertilityAge+1,\
:self.frange+self.T_1] = np.transpose(np.loadtxt(str("Data_Files/" + I_all[index] + "_fertility.csv"),delimiter=',',skiprows=1\
,usecols=(self.agestopull[self.FirstFertilityAge:self.LastFertilityAge+1]-22))[48-self.frange:48+self.T_1,:])
self.MortalityRates[i,self.FirstDyingAge:,:self.T_1] = np.transpose(np.loadtxt(str("Data_Files/" + I_all[index] + "_mortality.csv")\
,delimiter=',',skiprows=1, usecols=(self.agestopull[self.FirstDyingAge:]-67))[:self.T_1,:])
self.Migrants[i,:self.MaxImmigrantAge,:self.T_1] = np.einsum("s,t->st",np.loadtxt(("Data_Files/net_migration.csv"),delimiter=','\
,skiprows=1, usecols=[index+1])[self.agestopull[:self.MaxImmigrantAge]]*100, np.ones(self.T_1))
#Gets initial population share
self.Nhat[:,:,0] = self.N[:,:,0]/np.sum(self.N[:,:,0])
#Increases fertility rates to account for different number of periods lived
self.all_FertilityRates = self.all_FertilityRates*80/self.S
self.MortalityRates = self.MortalityRates*80/self.S
#The last generation dies with probability 1
self.MortalityRates[:,-1,:] = np.ones((self.I, self.T))
#Gets steady-state values for all countries by taking the mean at year T_1-1 across countries
f_bar = np.mean(self.all_FertilityRates[:,:,self.frange+self.T_1-1], axis=0)
rho_bar = np.mean(self.MortalityRates[:,:,self.T_1-1], axis=0)
#Set to the steady state for every year beyond year T_1
self.all_FertilityRates[:,:,self.frange+self.T_1:] = np.tile(np.expand_dims(f_bar, axis=2), (self.I,1,self.T-self.T_1))
self.MortalityRates[:,:,self.T_1:] = np.tile(np.expand_dims(rho_bar, axis=2), (self.I,1,self.T-self.T_1))
#FertilityRates is exactly like all_FertilityRates except it begins at time t=0 rather than time t=-self.frange
self.FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,:] = self.all_FertilityRates[:,self.FirstFertilityAge:self.LastFertilityAge+1,self.frange:]
def Demographics(self, demog_ss_tol, UseSSDemog=False):
"""
Description:
- This function calculates the population dynamics and steady state from the imported data by doing the following:
1. For each year from now until year T, uses equations 3.11 and 3.12 to find the net population in a new year.
(Note however that after year T_1 the fertility, mortality, and immigration rates are set to be the same across countries)
2. Divides the new population by the world population to get the population share for each country and cohort
3. While doing steps 1-2, finds the immigration rate since the data only gives us net migration
4. After getting the population dynamics until year T, we continue to get population shares of future years beyond time T
as explained in steps 1-2 until it converges to a steady state
5. Stores the new steady state and non-steady state variables of population shares and mortality in the OLG object
Inputs:
- UseSSDemog = Boolean: True uses the steady state demographics in calculating the transition path. Mostly used for debugging purposes
- demog_ss_tol = Scalar: The tolerance for the greatest absolute difference between 2 years' population shares
before it is considered to be the steady state
Variables Called from Object:
- self.N = Array: [I,S,T], Population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T], World opulation share of each country for each age cohort and year
- self.FertilityRates = Array: [I,S,T], Fertility rates from the present time to year T
- self.Migrants = Array: [I,S,T], Number of immigrants
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of Time Periods
- self.T_1 = Int: Transition year for the demographics
Variables Stored in Object:
- self.ImmigrationRates = Array: [I,S,T], Immigration rates of each country for each age cohort and year
- self.Kids = Array: [I,S,T], Matrix that stores the per-household number of kids in each country and each time period
- self.Kids_ss = Array: [I,S], Steady state per-household number of kids for each country at each age
- self.N = Array: [I,S,T], UPDATED population of each country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], UPDATED world population share of each country for each age cohort and year
- self.Nhat_ss = Array: [I,S], Population of each country for each age cohort in the steady state
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.MortalityRates = Array: [I,S,T+S], UPDATED mortality rates of each country for each age cohort and year
Other Functions Called:
- None
Objects in Function:
- pop_old = Array: [I,S,T], Population shares in a given year beyond T
that is compared with pop_new to determine the steady state
- pop_new = Array: [I,S,T], Population shares in a given year beyond T
that is compared with pop_old to determine the steady state
- kidsvec = Array: [I,f_range], extracts each cohorts number of kids in each period
- future_year_iter = Int: Counter that keeps track of how many years beyond T it takes
for the population shares to converge to the steady state
Outputs:
- None
"""
#Initializes immigration rates
self.ImmigrationRates = np.zeros((self.I,self.S,self.T))
self.Kids=np.zeros((self.I,self.S,self.T))
#Getting the population and population shares from the present to year T
for t in xrange(1,self.T):
#Gets new babies born this year (Equation 3.11)
self.N[:,0,t] = np.sum((self.N[:,:,t-1]*self.FertilityRates[:,:,t-1]), axis=1)
#Get the immigration RATES for the past year
#If before the transition year T_1, just divide total migrants by population
if t <= self.T_1:
self.ImmigrationRates[:,:,t-1] = self.Migrants[:,:,t-1]/self.N[:,:,t-1]*80/self.S
#If beyond the transition year T_1, average the immigration rates in year T_1 itself
else:
self.ImmigrationRates[:,:,t-1] = np.mean(self.ImmigrationRates[:,:,self.T_1-1],\
axis=0)*80/self.S
#Gets the non-newborn population for the next year (Equation 3.12)
self.N[:,1:,t] = self.N[:,:-1,t-1]*(1+self.ImmigrationRates[:,:-1,t-1]-self.MortalityRates[:,:-1,t-1])
#Gets the population share by taking a fraction of the total world population this year
self.Nhat[:,:,t] = self.N[:,:,t]/np.sum(self.N[:,:,t])
#Gets the number of kids each agent has in this period
for s in xrange(self.FirstFertilityAge,self.LastFertilityAge+self.LeaveHouseAge):
kidsvec = np.diagonal(self.all_FertilityRates[:,s-self.LeaveHouseAge+1:s+1,t:t+self.LeaveHouseAge],axis1=1, axis2=2)
self.Kids[:,s,t-1] = np.sum(kidsvec,axis=1)
#Gets Immigration rates for the final year
self.ImmigrationRates[:,:,-1] = np.mean(self.ImmigrationRates[:,:,self.T_1-1],axis=0)*80/self.S
#Gets Kids for the final year (just the steady state)
self.Kids[:,:,-1] = self.Kids[:,:,-2]
#Initialize iterating variables to find the steady state population shares
pop_old = self.N[:,:,-1]
pop_new = self.N[:,:,-1]
future_year_iter = 0
#Calculates new years of population shares until the greatest absolute difference between 2 consecutive years is less than demog_ss_tol
while np.max(np.abs(self.Nhat[:,:,-1] - self.Nhat[:,:,-2])) > demog_ss_tol:
pop_new[:,0] = np.sum((pop_old[:,:]*self.FertilityRates[:,:,-1]),axis=1)
pop_new[:,1:] = pop_old[:,:-1]*(1+self.ImmigrationRates[:,:-1,-1]-self.MortalityRates[:,:-1,-1])
self.Nhat = np.dstack((self.Nhat,pop_new/np.sum(pop_new)))
future_year_iter += 1
#Stores the steady state year in a seperate matrix
self.Nhat_ss = self.Nhat[:,:,-1]
self.Mortality_ss=self.MortalityRates[:,:,-1]
self.Kids_ss = self.Kids[:,:,-1]
#Deletes all the years between t=T and the steady state calculated in the while loop
self.Nhat = self.Nhat[:,:,:self.T]
#Imposing the ss for years after self.T
self.Nhat = np.dstack(( self.Nhat[:,:,:self.T], np.einsum("is,t->ist",self.Nhat_ss,np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.MortalityRates = np.dstack(( self.MortalityRates[:,:,:self.T], np.einsum("is,t->ist",self.Mortality_ss, np.ones(self.S)) ))
#Imposing the ss for years after self.T
self.Kids = np.dstack(( self.Kids[:,:,:self.T], np.einsum("is,t->ist",self.Kids_ss, np.ones(self.S)) ))
#Overwrites all the years in the transition path with the steady state if UseSSDemog == True
if UseSSDemog == True:
self.Nhat = np.einsum("is,t->ist",self.Nhat_ss,np.ones(self.T+self.S))
self.MortalityRates = np.einsum("is,t->ist",self.Mortality_ss,np.ones(self.T+self.S))
self.Kids = np.einsum("is,t->ist",self.Kids_ss,np.ones(self.T+self.S))
def plotDemographics(self, T_touse="default", compare_across="T", data_year=0):
"""
Description: This calls the plotDemographics function from the AuxiliaryDemographics.py file. See it for details
"""
ages = self.LeaveHouseAge, self.FirstFertilityAge, self.LastFertilityAge, self.FirstDyingAge, self.MaxImmigrantAge
datasets = self.FertilityRates, self.MortalityRates, self.ImmigrationRates, self.Nhat, self.Kids
#Calls the Auxiliary Demographics file for this function
demog.plotDemographics(ages, datasets, self.I, self.S, self.T, self.I_touse, T_touse, compare_across, data_year)
def immigrationplot(self):
subplotdim_dict = {2:221, 3:221, 4:221, 5:231, 6:231, 7:241}
colors = ["blue","green","red","cyan","purple","yellow","brown"]
fig = plt.figure()
fig.suptitle("Immigration Rates")
for i in range(self.I):
ax = fig.add_subplot(subplotdim_dict[self.I]+i, projection='3d')
S, T = np.meshgrid(range(self.S), range(self.T))
ax.plot_surface(S, T, np.transpose(self.ImmigrationRates[i,:,:self.T]), color=colors[i])
ax.set_zlim3d(np.min(self.ImmigrationRates[i,:,:self.T]), np.max(self.ImmigrationRates[:,:,:self.T])*1.05)
ax.set_title(self.I_touse[i])
ax.set_xlabel('S')
ax.set_ylabel('T')
plt.show()
#STEADY STATE
def get_Gamma(self, w, e):
"""
Description:
- Gets the calculation of gamma
Inputs:
- w = Array: [I,T+S] or [I], Wage rate for either the transition path or the steady steady-state
- e = Array: [I,S,T+S] or [I,S], Labor productivities for either the transition path or the steady steady-state
Variables Called From Object:
- self.chi = Scalar: Leisure preference parameter
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Outputs:
- Gamma = Array: [I,S,T+S] or [I,S], Gamma values for each country
"""
#If getting the SS
if e.ndim == 2:
we = np.einsum("i,is->is", w, e)
#If getting transition path
elif e.ndim == 3:
we = np.einsum("it, ist -> ist", w, e)
Gamma = ( ( 1+self.chi*(self.chi/we)**(self.rho-1) )**((1-self.rho*self.sigma)/(self.rho-1)) ) ** (-1/self.sigma)
return Gamma
def get_lhat(self,c,w,e):
"""
Description:
- Gets household leisure based on equation 3.20
Inputs:
- c = Array: [I,S,T+S] or [I,S], Consumption for either the transition path or the steady steady-state
- w = Array: [I,T+S] or [I], Wage rate for either the transition path or the steady steady-state
- e = Array: [I,S,T+S] or [I,S], Labor productivities for either the transition path or the steady steady-state
Variables Called from Object:
- self.chi = Scalar: Leisure preference parameter
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- lhat = Array: [I,S,T+S] or [I,S], Leisure for either the transition path or the steady steady-state
"""
if e.ndim == 2:
we = np.einsum("i,is->is",w,e)
elif e.ndim == 3:
we = np.einsum("it,ist->ist",w,e)
lhat=c*(self.chi/we)**self.rho
return lhat
def get_n(self, lhat):
"""
Description:
-Calculates the aggregate labor productivity based on equation (3.14)
Inputs:
- lhat = Array: [I,S,T+S] or [I,S], Leisure for either the transition path or the steady steady-state
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor productivities for the transition path
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.lbar = Array: [T+S], Time endowment in each year
- self.Nhat = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.Nhat_ss = Array: [I,S], Population of each country for each age cohort in the steady state
- self.lbar_ss = Int: Steady state time endowment. Normalized to 1.0
- self.T = Int: Number of Time Periods
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- n = Array: [I,S,T] or [I,S], Aggregate labor productivity for either the transition path or the steady steady-state
"""
if lhat.ndim == 2:
n = np.sum(self.e_ss*(self.lbar_ss-lhat)*self.Nhat_ss,axis=1)
elif lhat.ndim == 3:
n = np.sum(self.e[:,:,:self.T]*(self.lbar[:self.T]-lhat)*self.Nhat[:,:,:self.T],axis=1)
return n
def get_Y(self, kd, n):
"""
Description:
-Calculates the aggregate output based on equation (3.15)
Inputs:
- kd = Array: [I,S,T+S] or [I,S], Domestic owned capital path for either the transition path or steady-state.
- n = Array: [I,S,T+S] or [I,S], Aggregate labor productivity for either the transition path or the steady steady-state
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- Y = Array: [I,S,T+S] or [I,S], Total output from firms for either the transition path or the steady steady-state
"""
if kd.ndim ==1:
Y = (kd**self.alpha) * ((self.A*n)**(1-self.alpha))
elif kd.ndim== 2:
Y = (kd**self.alpha) * (np.einsum("i,is->is",self.A,n)**(1-self.alpha))
return Y
def get_lifetime_decisionsSS(self, cK_1, w_ss, r_ss, Gamma_ss, bq_ss):
"""
Description:
- 1. Solves for future consumption decisions as a function of initial consumption (Equation 3.22)
- 2. Solves for savings decisions as a function of consumption decisions and previous savings decisions (Equation 3.19)
Inputs:
- cK_1 = Array: [I], Kids Consumption of first cohort for each country
- Gamma_ss = Array: [I,S], Gamma variable, used in Equation 4.22
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- avec_ss = Array: [I,S+1], Vector of steady state assets
- cKvec_ss = Array: [I,S], Vector of steady state kids consumption
- cvec_ss = Array: [I,S], Vector of steady state consumption
"""
cKvec_ss = np.zeros((self.I,self.S))
cvec_ss = np.zeros((self.I,self.S))
avec_ss = np.zeros((self.I,self.S+1))
cKvec_ss[:,0] = cK_1
cvec_ss[:,0] = cK_1/Gamma_ss[:,0]
for s in xrange(self.S-1):
#Equation 4.26
cKvec_ss[:,s+1] = ( ( (self.beta*(1-self.Mortality_ss[:,s])*(1+r_ss-self.delta) )**(1/self.sigma) )*cKvec_ss[:,s] )/np.exp(self.g_A)
#Equation 4.25
cvec_ss[:,s+1] = cKvec_ss[:,s+1]/Gamma_ss[:,s+1]
#Equation 4.23
avec_ss[:,s+1] = (w_ss*self.e_ss[:,s]*self.lbar_ss + (1 + r_ss - self.delta)*avec_ss[:,s] + bq_ss[:,s] \
- cvec_ss[:,s]*(1+self.Kids_ss[:,s]*Gamma_ss[:,s]+w_ss*self.e_ss[:,s]*(self.chi/(w_ss*self.e_ss[:,s]))**self.rho))*np.exp(-self.g_A)
#Equation 4.23 for final assets
avec_ss[:,s+2] = (w_ss*self.e_ss[:,s+1] + (1 + r_ss - self.delta)*avec_ss[:,s+1] - cvec_ss[:,s+1]*\
(1+self.Kids_ss[:,s+1]*Gamma_ss[:,s+1]+w_ss*self.e_ss[:,s+1]*(self.chi/(w_ss*self.e_ss[:,s+1]))\
**self.rho))*np.exp(-self.g_A)
return cvec_ss, cKvec_ss, avec_ss
def GetSSComponents(self, bq_ss, r_ss, PrintSSEulErrors=False):
"""
Description:
- Solves for all the other variables in the model using bq_ss and r_ss
Inputs:
- bq_ss = Array: [I,S],
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- self.A = Array: [I], Technology level for each country
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Nhat_ss = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.alpha = Scalar: Capital share of production
Variables Stored in Object:
- None
Other Functions Called:
- get_lhat = Solves for leisure as in Equation 4.24
- get_n = Solves for labor supply as in Equation 4.17
- get_Gamma = Solves for the Gamma variable as in Equation 4.22
- get_Y = Solves for output as in Equation 4.18
- householdEuler_SS = System of Euler equations to solve the household problem. Used by opt.fsolve
Objects in Function:
- avec_ss = Array: [I,S], Steady state assets holdings for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption for each country and cohort
- cvec_ss = Array: [I,S], Steady state consumption for each country and cohort
- c1_guess = Array: [I,S], Initial guess for consumption of the youngest cohort
- kd_ss = Array: [I], Steady state total capital holdings for each country
- kf_ss = Array: [I], Steady state foreign capital in each country
- lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- n_ss = Array: [I], Steady state labor supply
- opt_c1 = Array: [I,S], Optimal consumption of the youngest cohort
- Gamma_ss = Array: [I,S], Steady state Gamma variable (see equation 4.22)
- w_ss = Array: [I], Steady state wage rate
- y_ss = Array: [I], Steady state output of each country
Outputs:
- w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, and lhat_ss
"""
def householdEuler_SS(cK_1, w_ss, r_ss, Gamma_ss, bq_ss):
"""
Description:
- This is the function called by opt.fsolve.
Will stop iterating until a correct value of initial
consumption for each country makes the final assets holdings of each country equal to 0
Inputs:
- cK_1 = Array: [I], Kids Consumption of first cohort for each country
- psi_ss = Array: [I,S], Psi variable, used in Equation 3.21
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady-state intrest rate
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetimedecisionsSS = calls the above function for the purpose of solving for its roots
in an fsolve.
Objects in Function:
- cpath = Array: [I,S], Vector of steady state consumption
- cK_path = Array: [I,S], Vector of steady state kids consumption
- aseets_path = Array: [I,S+1], Vector of steady state assets
Outputs:
- Euler = Array: [I], Final assets for each country. Must = 0 for system to solve
"""
cpath, cK_path, assets_path = self.get_lifetime_decisionsSS(cK_1, w_ss, r_ss, Gamma_ss, bq_ss)
Euler = assets_path[:,-1]
if np.any(cpath<0):
print "WARNING! The fsolve for initial optimal consumption guessed a negative number"
Euler = np.ones(Euler.shape[0])*9999.
return Euler
def checkSSEulers(cvec_ss, cKvec_ss, avec_ss, w_ss, r_ss, bq_ss, Gamma_ss):
"""
Description:
-Verifies the Euler conditions are statisified for solving for the steady
Inputs:
- cvec_ss = Array: [I,S], Steady state consumption for each country and cohort
- cKvec_ss = Array: [I,S], Steady state kids consumption for each country and cohort
- avec_ss = Array: [I,S], Steady state assets holdings for each country and cohort
- w_ss = Array: [I], Steady state wage rate
- r_ss = Scalar: Steady state interest rate
- bq_ss = Array: [I,S], Steady state bequests level
- Gamma_ss = Array: [I,S], Steady state shorthand variable, See 4.22
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids' consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.e_ss = Array: [I,S], Labor produtivities for the Steady State
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand calculation variable
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.w_ss = Array: [I], Steady state wage rate
- self.beta = Scalar: Calculated overall future discount rate
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.r_ss = Scalar: Steady state intrest rate
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S], Matrix product of w and e
Outputs:
- None
"""
we = np.einsum("i,is->is",w_ss,self.e_ss)
Household_Euler = avec_ss[:,-1]
Chained_C_Condition = cKvec_ss[:,:-1]**(-self.sigma) - \
self.beta*(1-self.Mortality_ss[:,:-1])*(cKvec_ss[:,1:]*np.exp(self.g_A))**-self.sigma * (1+r_ss-self.delta)
Modified_Budget_Constraint = cvec_ss -( we*self.lbar_ss + (1+r_ss-self.delta)*avec_ss[:,:-1] + bq_ss - avec_ss[:,1:]*np.exp(self.g_A) )\
/(1+self.Kids_ss*Gamma_ss+we*(self.chi/we)**self.rho)
Consumption_Ratio = cKvec_ss - cvec_ss*Gamma_ss
return Household_Euler, Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio
#Equation 4.19
w_ss = (self.alpha/r_ss)**(self.alpha/(1-self.alpha))*(1-self.alpha)*self.A
#Equation 4.22
Gamma_ss = self.get_Gamma(w_ss, self.e_ss)
#Initial guess for the first cohort's kids consumption
cK1_guess = np.ones(self.I)*5
#Finds the optimal kids consumption for the first cohort
opt_cK1 = opt.fsolve(householdEuler_SS, cK1_guess, args = (w_ss, r_ss, Gamma_ss, bq_ss))
#Gets the optimal paths for consumption, kids consumption and assets as a function of the first cohort's consumption
cvec_ss, cKvec_ss, avec_ss = self.get_lifetime_decisionsSS(opt_cK1, w_ss, r_ss, Gamma_ss, bq_ss)
if PrintSSEulErrors:
Household_Euler, Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio = checkSSEulers(cvec_ss, cKvec_ss, avec_ss, w_ss, r_ss, bq_ss, Gamma_ss)
print "\nZero final period assets satisfied:", np.isclose(np.max(np.absolute(Household_Euler)), 0)
print "Equation 4.26 satisfied:", np.isclose(np.max(np.absolute(Chained_C_Condition)), 0)
print "Equation 4.23 satisfied:", np.isclose(np.max(np.absolute(Modified_Budget_Constraint)), 0)
print "Equation 4.25 satisfied", np.isclose(np.max(np.absolute(Consumption_Ratio)), 0)
#print Chained_C_Condition[0,:]
#print Modified_Budget_Constraint[0,:]
#Snips off the final entry of assets since it is just 0 if the equations solved correctly
avec_ss = avec_ss[:,:-1]
#Equation 4.24
lhat_ss = self.get_lhat(cvec_ss, w_ss, self.e_ss)
#Equation 4.17
n_ss = self.get_n(lhat_ss)
#Equation 4.16
kd_ss = np.sum(avec_ss*self.Nhat_ss,axis=1)
#Equation 4.18
y_ss = self.get_Y(kd_ss,n_ss)
#Equation 4.27
kf_ss = (self.alpha*self.A/r_ss)**(1/(1-self.alpha)) * n_ss-kd_ss
return w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, lhat_ss
def EulerSystemSS(self, guess, PrintSSEulErrors=False):
"""
Description:
- System of Euler equations that must be satisfied (or = 0) for the ss to solve.
Inputs:
- guess = Array: [I+1], Contains guesses for individual bequests in each country
and the guess for the world intrest rate
- PrintSSEulErrors = Boolean: True prints the Euler Errors in each iteration of calculating the steady state
Variables Called from Object:
- self.Mortality_ss = Array: [I,S], Mortality rates of each country for each age cohort in the steady state
- self.Nhat_ss = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- GetSSComponents = System of equations that solves for wages, consumption, assets,
capital stocks, labor input, domestic output, and leisure in terms
of the world intrest rate and bequests
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the individuals who die in the steady state.
Evenly distributed to eligible-aged cohorts.
- avec_ss = Array: [I,S], Current guess for the ss assets holdings for each country and cohort
- bqindiv_ss = Array: [I], Current guess for the amount of bequests each eligible-aged
individual will receive in each country
- bq_ss = Array: [I,S], Vector of bequests received for each cohort and country.
Basically bqindiv_ss copied for each eligible-aged individual.
- cKvec_ss = Array: [I,S], Current guess for ss kids' consumption for each country and cohort.
- cvec_ss = Array: [I,S], Current guess for ss consumption for each country and cohort
- kd_ss = Array: [I], Current guess for ss total domestically-held capital for each country
- kf_ss = Array: [I], Current guess for ss foreign capital in each country
- lhat_ss = Array: [I,S], Current guess for ss leisure decision for each country and cohort.
- n_ss = Array: [I], Current guess for ss labor supply
- w_ss = Array: [I], Current guess for each countries ss wage rate as a function of r_ss and bqvec_ss
- y_ss = Array: [I], Current guess for ss output of each country
- r_ss = Scalar: Current guess for the steady-state intrest rate
- Euler_bq = Array: [I], Distance between bqindiv_ss and the actual bqindiv_ss calculated in the system.
Must = 0 for the ss to correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks. Must = 0 for the ss to correctly solve
Outputs:
- Euler_all = Array: [I+1], Euler_bq and Euler_kf stacked together. Must = 0 for the ss to correctly solve
"""
#Breaking up the input into its 2 components
bqindiv_ss = guess[:-1]
r_ss = guess[-1]
#Initializes a vector of bequests received for each individial. Will be = 0 for a block of young and a block of old cohorts
bq_ss = np.zeros((self.I,self.S))
bq_ss[:,self.FirstFertilityAge:self.FirstDyingAge] = \
np.einsum("i,s->is", bqindiv_ss, np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables in terms of bequests and intrest rate
w_ss, cvec_ss, cKvec_ss, avec_ss, kd_ss, kf_ss, n_ss, y_ss, lhat_ss = self.GetSSComponents(bq_ss, r_ss, PrintSSEulErrors)
#Sum of all assets holdings of dead agents to be distributed evenly among all eligible agents
alldeadagent_assets = np.sum(avec_ss[:,self.FirstDyingAge:]*\
self.Mortality_ss[:,self.FirstDyingAge:]*self.Nhat_ss[:,self.FirstDyingAge:], axis=1)
#Equation 3.29
Euler_bq = bqindiv_ss - alldeadagent_assets/np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],\
axis=1)
#Equation 3.24
Euler_kf = np.sum(kf_ss)
Euler_all = np.append(Euler_bq, Euler_kf)
if PrintSSEulErrors: print "Euler Errors:", Euler_all
return Euler_all
def SteadyState(self, rss_guess, bqss_guess, PrintSSEulErrors=False):
"""
Description:
- Finds the steady state of the OLG Model by doing the following:
1. Searches over values of r and bq that satisfy Equations 3.19 and 3.24
2. Uses the correct ss values of r and bq to find all the other ss variables
3. Checks to see of the system has correctly solved
Inputs:
- bqindiv_ss_guess = Array: [I], Initial guess for ss bequests that each eligible-aged individual will receive
- PrintSSEulErrors = Boolean: True prints the Euler Errors in each iteration of calculating the steady state
- rss_guess = Scalar: Initial guess for the ss intrest rate
Variables Called from Object:
- self.I = Int: Number of Countries
- self.FirstFertilityAge = Int: First age where agents give birth
- self.FirstDyingAge = Int: First age where agents begin to die
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqindiv_ss = Array: [I], Bequests that each eligible-aged individual will receive in the steady state
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady State kid's consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total domestically-owned capital holdings for each country
- self.kf_ss = Array: [I], Steady state foreign capital in each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- self.n_ss = Array: [I], Steady state aggregate labor productivity in each country
- self.Gamma_ss = Array: [I,S], Steady state value of shorthand calculation variable
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Other Functions Called:
- self.EulerSystemSS = Initiates the whole process of solving for the steady state, starting with this function
- self.GetSSComponenets = Once the bequests and interest rates are solved for, this function gives us what
the implied individual pieces would be. Then, we have those pieces stored in the object.
- self.get_Gamma = given wage and productivity paths, this function calculates the shorthand variable path.
Objects in Function:
- alldeadagent_assets = Array: [I], Sum of assets of all the individuals who die in the steady state.
Evenly distributed to eligible-aged cohorts.
- Euler_bq = Array: [I], Distance between bqindiv_ss and the actual bqindiv_ss calculated in the system.
Must = 0 for the ss to correctly solve.
- Euler_kf = Scalar: Sum of the foreign capital stocks. Must = 0 for the ss to correctly solve
Outputs:
- None
"""
#Prepares the initial guess for the fsolve
guess = np.append(bqss_guess, rss_guess)
#Searches over bq and r to find values that satisfy the Euler Equations (3.19 and 3.24)
ss = opt.fsolve(self.EulerSystemSS, guess, args=PrintSSEulErrors)
#Breaking up the output into its 2 components
self.bqindiv_ss = ss[:-1]
self.r_ss = ss[-1]
#Initializes a vector for bequests distribution. Will be = 0 for a block of young and a block of old cohorts who don't get bequests
self.bqvec_ss = np.zeros((self.I,self.S))
self.bqvec_ss[:,self.FirstFertilityAge:self.FirstDyingAge] = np.einsum("i,s->is",self.bqindiv_ss,\
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Calls self.GetSSComponents, which solves for all the other ss variables in terms of bequests and intrest rate
self.w_ss, self.cvec_ss, self.cKvec_ss, self.avec_ss, self.kd_ss, self.kf_ss, self.n_ss, self.y_ss, self.lhat_ss \
= self.GetSSComponents(self.bqvec_ss,self.r_ss)
#Calculates and stores the steady state gamma value
self.Gamma_ss = self.get_Gamma(self.w_ss,self.e_ss)
#Sum of all assets holdings of dead agents to be distributed evenly among all eligible agents
alldeadagent_assets = np.sum(self.avec_ss[:,self.FirstDyingAge:]*self.Mortality_ss[:,self.FirstDyingAge:]*\
self.Nhat_ss[:,self.FirstDyingAge:], axis=1)
print "\n\nSTEADY STATE FOUND!"
#Checks to see if the Euler_bq and Euler_kf equations are sufficiently close to 0
if self.CheckerMode==False:
#Equation 3.29
Euler_bq = self.bqindiv_ss - alldeadagent_assets/np.sum(self.Nhat_ss[:,self.FirstFertilityAge:self.FirstDyingAge],\
axis=1)
#Equation 3.24
Euler_kf = np.sum(self.kf_ss)
print "-Euler for bq satisfied:", np.isclose(np.max(np.absolute(Euler_bq)), 0)
print "-Euler for r satisfied:", np.isclose(Euler_kf, 0), "\n\n"
def PrintSSResults(self):
"""
Description:
-Prints the final result of steady state calculations
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.cK_vec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kf_ss = Array: [I], Steady state foreign capital in each country
- self.kd_ss = Array: [I], Steady state total capital holdings for each country
- self.n_ss = Array: [I], Steady state aggregate productivity in each country
- self.w_ss = Array: [I], Steady state wage rate
- self.y_ss = Array: [I], Steady state output in each country
- self.r_ss = Scalar: Steady state intrest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
print "assets steady state:", self.avec_ss
print "kf steady state", self.kf_ss
print "kd steady state", self.kd_ss
print "bq steady state", self.bqindiv_ss
print "n steady state", self.n_ss
print "y steady state", self.y_ss
print "r steady state", self.r_ss
print "w steady state", self.w_ss
print "c_vec steady state", self.cvec_ss
print "cK_vec steady state", self.cKvec_ss
def plotSSResults(self):
"""
Description:
- Plots the final calculations of the Steady State
Inputs:
- None
Variables Called from Object:
- self.avec_ss = Array: [I,S], Steady state assets
- self.bqvec_ss = Array: [I,S], Distribution of bequests in the steady state
- self.cKvec_ss = Array: [I,S], Steady state kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
plt.title("Steady state")
plt.subplot(231)
for i in range(self.I):
plt.plot(range(self.S),self.cvec_ss[i,:])
plt.title("Consumption")
#plt.legend(self.I_touse[:self.I])
plt.subplot(232)
for i in range(self.I):
plt.plot(range(self.S),self.cKvec_ss[i,:])
plt.title("Kids' Consumption")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(233)
for i in range(self.I):
plt.plot(range(self.S),self.avec_ss[i,:])
plt.title("Assets")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(234)
for i in range(self.I):
plt.plot(range(self.S),self.lhat_ss[i,:])
plt.title("Leisure")
#plt.legend(self.I_touse[:self.I])
#plt.show()
plt.subplot(235)
for i in range(self.I):
plt.plot(range(self.S),self.bqvec_ss[i,:])
plt.title("Bequests")
#plt.legend(self.I_touse[:self.I])
plt.show()
def plotSSUtility(self, cK_1):
"""
Description:
- Plots the steady state values across S. You do this, James
Inputs:
- cK_1
Variables Called From Object:
- self.S
- self.Gamma_ss
- self.beta
- self.Mortality_ss
- self.r_ss
- self.delta
- self.sigma
- self.g_A
- self.chi
- self.w_ss
- self.e_ss
- self.rho
- self.Kids_ss
- self.lbar_ss
- self.bqvec_ss
- self.cvec_ss
Variables Stored in Object:
-
Other Functions Called:
-
Objects in Function:
-
Outputs:
-
"""
cKvec_ss = np.zeros((len(cK_1),self.S))
cvec_ss = np.zeros((len(cK_1),self.S))
avec_ss = np.zeros((len(cK_1),self.S+1))
cKvec_ss[:,0] = cK_1
cvec_ss[:,0] = cK_1/self.Gamma_ss[0,0]
#I QUESTION WHY WE'RE DOING THIS
for s in xrange(self.S-1):
#Equation 4.26
cKvec_ss[:,s+1] = ( ((self.beta**-1*(1-self.Mortality_ss[0,s])*(1+self.r_ss-self.delta))**(1/self.sigma) )*cKvec_ss[:,s] )/np.exp(self.g_A)
#Equation 4.25
cvec_ss[:,s+1] = cKvec_ss[:,s+1]/self.Gamma_ss[0,s+1]
#Equation 4.23
avec_ss[:,s+1] = (self.w_ss[0]*self.e_ss[0,s]*self.lbar_ss + (1 + self.r_ss - self.delta)*avec_ss[:,s] + self.bqvec_ss[0,s] \
- cvec_ss[:,s]*(1+self.Kids_ss[0,s]*self.Gamma_ss[0,s]+self.w_ss[0]*self.e_ss[0,s]*(self.chi/(self.w_ss[0]*self.e_ss[0,s]))**self.rho))*np.exp(-self.g_A)
#Equation 4.23 for final assets
avec_ss[:,s+2] = (self.w_ss[0]*self.e_ss[0,s+1] + (1 + self.r_ss - self.delta)*avec_ss[:,s+1] - cvec_ss[:,s+1]*\
(1+self.Kids_ss[0,s+1]*self.Gamma_ss[0,s+1]+self.w_ss[0]*self.e_ss[0,s+1]*(self.chi/(self.w_ss[0]*self.e_ss[0,s+1]))\
**self.rho))*np.exp(-self.g_A)
lhat_ss = cvec_ss*(self.chi/self.w_ss[0]*self.e_ss[0,:])**self.rho
betaj = self.beta**np.arange(self.S)
U = betaj*(1-self.sigma)**-1*(1-self.Mortality_ss[0])*\
( (cvec_ss**(1-1/self.rho) + self.chi*lhat_ss**(1-1/self.rho))**((1/self.sigma)/(1-1/self.rho))\
+ self.Kids_ss[0]*cKvec_ss**(1-self.sigma) )
V = betaj*(1-self.sigma)**-1*(1-self.Mortality_ss[0])*\
(cvec_ss**(1-1/self.rho) + self.chi*lhat_ss**(1-1/self.rho))**((1/self.sigma)/(1-1/self.rho))
H = betaj**-1*(1-self.sigma)**-1*self.Kids_ss[0]*cKvec_ss**(1-self.sigma)
U2 = np.sum(V+H, axis=1)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
c1 = cK_1/self.Gamma_ss[0,0]
X, Y = np.meshgrid(c1, cK_1)
Z = U2
ax.plot_surface(X, Y, Z)
ax.set_xlabel('Consumption')
ax.set_ylabel('Kids Consumption')
ax.set_zlabel('Utility')
#plt.show()
#TIMEPATH-ITERATION
def set_initial_values(self, r_init, bq_init, a_init):
"""
Description:
- Saves the initial guesses of r, bq and a given by the user into the object
Inputs:
- a_init = Array: [I,S], Initial asset distribution given by User
- bq_init = Array: [I], Initial bequests given by User
- r_init = Scalar: Initial interest rate given by User
Variables Called from Object:
- None
Variables Stored in Object:
- self.a_init = Array: [I,S], Initial asset distribution given by Users
- self.bq_init = Array: [I], Initial bequests given by User
- self.r_init = Scalar: Initial interest rate given by User
Other Functions Called:
- None
Objects in Function:
- None
Outputs:
- None
"""
self.r_init = r_init
self.bq_init = bq_init
self.a_init = a_init
def get_initialguesses(self):
"""
Description:
- Generates an initial guess path used for beginning TPI calculation. The guess for the transition path for r follows the form
of a quadratic function given by y = aa x^2 + bb x + cc, while the guess for the bequests transition path is linear
Inputs:
- None
Variables Called from Object:
- self.bq_init = Array: [I], Initial bequests given by User
- self.I = Int: Number of Countries
- self.T = Int: Number of Time Periods
- self.r_init = Scalar: Initial interest rate given by User
- self.r_ss = Scalar: Steady state interest rate
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- aa = Scalar: coefficient for x^2 term
- bb = Scalar: coefficient for x term
- cc = Scalar: coefficient for constant term
Outputs:
- bqpath_guess = Array: [I,T], Initial path of bequests in quadratic form
- rpath_guess = Array: [T], Initial path of interest rates in quadratic form
"""
rpath_guess = np.zeros(self.T)
bqpath_guess = np.zeros((self.I,self.T))
func = lambda t, a, b: a/t + b
t = np.linspace(1,self.T, self.T-1)
x = np.array([0.0001,self.T])
y = np.array([self.r_init, self.r_ss])
popt, pcov = opt.curve_fit(func,x,y)
rtest = np.hstack(( self.r_init, func(t,popt[0],popt[1]) ))
plt.plot(range(self.T), rtest)
#plt.show()
cc = self.r_init
bb = -2 * (self.r_init-self.r_ss)/(self.T-1)
aa = -bb / (2*(self.T-1))
rpath_guess[:self.T] = aa * np.arange(0,self.T)**2 + bb*np.arange(0,self.T) + cc
#rpath_guess = rtest
for i in range(self.I):
bqpath_guess[i,:self.T] = np.linspace(self.bq_init[i], self.bqindiv_ss[i], self.T)
return rpath_guess, bqpath_guess
def GetTPIComponents(self, bqvec_path, r_path, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gets the transition paths for all the other variables in the model as a function of bqvec_path and r_path
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- r_path = Array: [T], Transition path for the intrest rate
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For debugging purposes
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_c_cK_a_matrices = Gets consumption, kids consumption and assets decisions as a function of r, w, and bq
- get_lhat = Gets leisure as a function of c, w, and e
- get_n = Gets aggregate labor supply
- get_Gamma = Application of Equation 4.22
- get_Y = Gets output
- NOTE: This function also contains the functions get_lifetime_decisions_Future, get_lifetime_decisions_Alive,
HHEulerSystem, and check_household_conditions, all of which are called in get_c_a_matrices
Objects in Function:
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Gamma (Equation 4.22)
Outputs:
- a_matrix = Array: [I,S,T+S], Transition path for assets holdings in each country
- c_matrix = Array: [I,S,T+S], Transition path for consumption in each country
- cK_matrix = Array: [I,S,T+S], Transition path for kids consumption in each country
- kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- kf_path = Array: [I,T], Transition path for foreign capital in each country
- lhat_path = Array: [I,S,T+S], Transition path for leisure for each cohort and country
- n_path = Array: [I,T], Transition path for total labor supply in each country
- w_path = Array: [I,T], Transition path for the wage rate in each country
- y_path = Array: [I,T], Transition path for output in each country
"""
#Functions that solve lower-diagonal household decisions in vectors
def get_lifetime_decisions_Future(cK0, c_uppermat, cK_uppermat, a_uppermat, w_path, r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each agent to be born in the future
Inputs:
- a_uppermat = Array: [I,S+1,T+S], Like c_uppermat, but for assets. Contains S+1 dimensions so we can consider
any leftover assets each agent has at the end of its lifetime.
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK0 = Array: [I*T], Initial consumption in each agent's lifetime
- cK_uppermat = Array: [I,S,T+S], Kids consumption matrix that already contains the kids consumptions decisions
for agents currently alive and is 0 for all agents born in the future
- c_uppermat = Array: [I,S,T+S], Consumption matrix that already contains the consumption decisions for agents
currently alive and is all 0s for agents born in the future.
This function fills in the rest of this matrix.
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- cy_fillca = External cython module that's equivilent to the for loop called in this function. It's marginally faster
compared to the loop that's in this code. This part will likely be replaced in the future. See pure_cython.pyx
for more details
Objects in Function:
- we = Array: [I,S,T+S] Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Filled in a_uppermat now with assets for cohorts to be born in the future
- cK_matrix = Array: [I,S,T+S], Filled in cK_uppermat now with kids consumption for cohorts to be born in the future
- c_matrix = Array: [I,S,T+S], Filled in c_uppermat now with consumption for cohorts to be born in the future
"""
#Initializes consumption and assets with all of the upper triangle already filled in
c_matrix = c_uppermat
cK_matrix = cK_uppermat
a_matrix = a_uppermat
cK_matrix[:,0,:self.T] = cK0.reshape(self.I,self.T)
c_matrix[:,0,:self.T] = cK_matrix[:,0,:self.T]/Gamma[:,0,:self.T]
#Gets we ahead of time for easier calculation
we = np.einsum("it,ist->ist",w_path,self.e)
if self.ShaveTime:
cy_fillca(c_matrix,cK_matrix,a_matrix,r_path,self.MortalityRates,bqvec_path,we,Gamma,self.lbar,self.Kids,self.beta,self.chi,self.delta,self.g_A,self.rho,self.sigma)
#Loops through each year (across S) and gets decisions for every agent in the next year
else:
for s in xrange(self.S-1):
#Gets consumption for every agents' next year using Equation 3.22
cK_matrix[:,s+1,s+1:self.T+s+1] = ((self.beta * (1-self.MortalityRates[:,s,s:self.T+s]) * (1 + r_path[s+1:self.T+s+1] - self.delta))**(1/self.sigma)\
* cK_matrix[:,s,s:self.T+s])*np.exp(-self.g_A)
c_matrix[:,s+1,s+1:self.T+s+1] = cK_matrix[:,s+1,s+1:self.T+s+1]/Gamma[:,s+1,s+1:self.T+s+1]
#Gets assets for every agents' next year using Equation 3.19
a_matrix[:,s+1,s+1:self.T+s+1] = ( (we[:,s,s:self.T+s]*self.lbar[s:self.T+s] + (1 + r_path[s:self.T+s] - self.delta)*a_matrix[:,s,s:self.T+s] + bqvec_path[:,s,s:self.T+s])\
-c_matrix[:,s,s:self.T+s]*(1+self.Kids[:,s,s:self.T+s]*Gamma[:,s,s:self.T+s]+we[:,s,s:self.T+s]*(self.chi/we[:,s,s:self.T+s])**(self.rho)\
) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
s=self.S-2
a_matrix[:,-1,s+2:self.T+s+2] = ( (we[:,-1,s+1:self.T+s+1]*self.lbar[s+1:self.T+s+1] + (1 + r_path[s+1:self.T+s+1] - self.delta)*a_matrix[:,-2,s+1:self.T+s+1])\
-c_matrix[:,-1,s+1:self.T+s+1]*(1+self.Kids[:,-1,s+1:self.T+s+1]*Gamma[:,-1,s+1:self.T+s+1]+we[:,-1,s+1:self.T+s+1]*(self.chi/we[:,-1,s+1:self.T+s+1])**(self.rho) ) )*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
#Functions that solve upper-diagonal household decisions in vectors
def get_lifetime_decisions_Alive(cK0, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description:
- Gets household decisions for consumption and assets for each cohort currently alive (except for the oldest cohort, whose household problem is a closed form solved in line 1435)
Inputs:
- a_matrix = Array: [I,S+1,T+S], Empty matrix that gets filled in with savings decisions each cohort currently alive
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- c0 = Array: [I*(S-1)], Today's consumption for each cohort
- cK_matrix = Array: [I,S,T+S], Empty matrix that gets filled with kids consumption decisions for each cohort currently living
- c_matrix = Array: [I,S,T+S], Empty matrix that gets filled in with consumption decisions for each cohort currently alive
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T], Mortality rates of each country for each age cohort and year
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions, now including those who are alive in time 0
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions, now including those who are alive in time 0
- c_matrix = Array: [I,S,T+S], Consumption decisions, now including those who are alive in time 0
"""
cK_matrix[:,:-1,0] = cK0.reshape(self.I,self.S-1)
c_matrix[:,:-1,0] = cK_matrix[:,:-1,0]/Gamma[:,:-1,0]
we = np.einsum("it,ist->ist",w_path,self.e)
for s in xrange(self.S):
t = s
cK_matrix[:,s+1:,t+1] = (self.beta * (1-self.MortalityRates[:,s:-1,t]) * (1 + r_path[t+1] - self.delta))**(1/self.sigma)\
* cK_matrix[:,s:-1,t]*np.exp(-self.g_A)
c_matrix[:,s+1:,t+1] = cK_matrix[:,s+1:,t+1]/Gamma[:,s+1:,t+1]
a_matrix[:,s+1:,t+1] = ( (we[:,s:,t]*self.lbar[t] + (1 + r_path[t] - self.delta)*a_matrix[:,s:-1,t] + bqvec_path[:,s:,t])\
-c_matrix[:,s:,t]*(1+self.Kids[:,s:,t]*Gamma[:,s:,t]+we[:,s:,t]*(self.chi/we[:,s:,t])**(self.rho) ) )*np.exp(-self.g_A)
#Gets assets in the final period of every agents' lifetime
a_matrix[:,-1,t+2] = ( (we[:,-1,t+1] + (1 + r_path[t+1] - self.delta)*a_matrix[:,-2,t+1])\
-c_matrix[:,-1,t+1]*(1+self.Kids[:,-1,t+1]*Gamma[:,-1,t+1]+we[:,-1,t+1]*(self.chi/we[:,-1,t+1])**(self.rho) ) )*np.exp(-self.g_A)
return c_matrix, cK_matrix, a_matrix
def Alive_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households decisions.
This function is called by opt.fsolve and searches over levels of
intial consumption that lead to the agents not having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for initial consumption, either for future agents or agents currently alive
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions for each cohort
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T+S], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
- Alive = Boolean: True means this function was called to solve for agents' decisions who are currently alive
False means this function was called to solve for agents' decisions will be born in future time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions for agents currently alive as a
function of consumption in the initial period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions each agent to be born in the future as a
function of each agent's initial consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions each cohort
- c_matrix = Array: [I,S,T], Consumption decisions each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when each cohort dies off.
Must = 0 for the Euler system to correctly solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Alive(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path)
#Household Eulers are solved when the agents have no assets at the end of their life
Euler = np.ravel(a_matrix[:,-1,1:self.S])
#print "Max Euler", max(Euler)
return Euler
def Future_EulerSystem(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path):
"""
Description: This is essentially the objective function for households decisions.
This function is called by opt.fsolve and searches over levels of
intial consumption that lead to the agents not having any assets when they die.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- c0_guess = Array: [I*(T+S)] or [I*(S-1)], Guess for initial consumption, either for future agents or agents currently alive
- c_matrix = Array: [I,S,T+S], Consumption decisions each cohort
- psi = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 3.21)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
- Alive = Boolean: True means this function was called to solve for agents' decisions who are currently alive
False means this function was called to solve for agents' decisions will be born in future time periods
Variables Called from Object:
- None
Variables Stored in Object:
- None
Other Functions Called:
- get_lifetime_decisions_Alive = Gets consumption and assets decisions for agents currently alive as a
function of consumption in the initial period (t=0).
- get_lifetime_decisions_Future = Gets consumption and assets decisions each agent to be born in the future as a
function of each agent's initial consumption (s=0).
Objects in Function:
- a_matrix = Array: [I,S+1,T], Savings decisions each cohort
- c_matrix = Array: [I,S,T], Consumption decisions each cohort
Outputs:
- Euler = Array: [T] or [S], Remaining assets when each cohort dies off.
Must = 0 for the Euler system to correctly solve.
"""
#Gets the decisions paths for each agent
c_matrix, cK_matrix, a_matrix = get_lifetime_decisions_Future(cK0_guess, c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path)
#Household Eulers are solved when the agents have no assets at the end of their life
Euler = np.ravel(a_matrix[:,-1,self.S:])
#print "Max Euler", max(Euler)
return Euler
#Checks various household condidions
def check_household_conditions(w_path, r_path, c_matrix, cK_matrix, a_matrix, Gamma, bqvec_path):
"""
Description:
- Essentially returns a matrix of residuals of the left and right sides of the Houehold Euler equations
to make sure the system solved correctly. Mostly used for debugging.
Inputs:
- a_matrix = Array: [I,S+1,T+S], Savings decisions each cohort
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- cK_matrix = Array: [I,S,T+S], Kids Consumption decisions for each cohort
- c_matrix = Array: [I,S,T+S], Consumption decisions for each each cohort
- Gammma = Array: [I,S,T+S], Transition path of shorthand calculation variable Psi (Equation 4.22)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T+S], Transition path for the wage rate in each country
Variables Called from Object:
- self.e = Array: [I,S,T+S], Labor Productivities
- self.T = Int: Number of time periods
- self.beta = Scalar: Calculated overall future discount rate
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.g_A = Scalar: Growth rate of technology
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
- self.sigma = Scalar: Rate of Time Preference
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- we = Array: [I,S,T+S], Matrix product of w and e
Outputs:
- Chained_C_Condition = Array: [I,S-1,T+S-1], Matrix of residuals in Equation 3.22
- Household_Euler = Array: [I,T+S], Matrix of residuals in of the 0 remaining assets equation
- Modified_Budget_Constraint= Array: [I,S-1,T+S-1], Matrix of residuals in Equation 3.19
"""
#Multiplies wages and productivities ahead of time for easy calculations of the first two equations below
we = np.einsum("it,ist->ist",w_path[:,:self.T-1],self.e[:,:-1,:self.T-1])
#Disparity between left and right sides of Equation 4.26
Chained_C_Condition = cK_matrix[:,:-1,:self.T-1]**(-self.sigma)\
- self.beta*(1-self.MortalityRates[:,:-1,:self.T-1])\
*(cK_matrix[:,1:,1:self.T]*np.exp(self.g_A))**(-self.sigma)*(1+r_path[1:self.T]-self.delta)
#Disparity between left and right sides of Equation 4.23
Modified_Budget_Constraint = c_matrix[:,:-1,:self.T-1]\
- (we*self.lbar[:self.T-1] + (1+r_path[:self.T-1]-self.delta)*a_matrix[:,:-2,:self.T-1] + bqvec_path[:,:-1,:self.T-1]\
- a_matrix[:,1:-1,1:self.T]*np.exp(self.g_A))\
/(1 + self.Kids[:,:-1,:self.T-1]*Gamma[:,:-1,:self.T-1] + we*(self.chi/we)**(self.rho) )
#Disparity between left and right sides of Equation 4.25
Consumption_Ratio = cK_matrix - c_matrix*Gamma
#Any remaining assets each agent has at the end of its lifetime. Should be 0 if other Eulers are solving correctly
Household_Euler = a_matrix[:,-1,:]
return Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio, Household_Euler
#Gets consumption and assets matrices using fsolve
def get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Solves for the optimal consumption and assets paths by searching over initial consumptions for agents alive and unborn
Inputs:
- bqvec_path = Array: [I,S,T+S], Transition path for distribution of bequests for each country
- Gamma = Array: [I,S,T+S], Transition path of shorthand calculation variable Gamma (Equation 4.22)
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging purposes.
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- self.a_init = Array: [I,S], Initial asset distribution given by User
- self.cKvec_ss = Array: [I,S], Steady state Kids consumption
- self.e = Array: [I,S,T+S], Labor Productivities
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.chi = Scalar: Leisure preference parameter
- self.delta = Scalar: Calulated overall depreciation rate
- self.rho = Scalar: The intratemporal elasticity of substitution between consumption and leisure
Variables Stored in Object:
- None
Other Functions Called:
- HHEulerSystem = Objective function for households (final assets at death = 0). Must solve for HH conditions to be satisfied
- get_lifetime_decisions_Alive = Gets lifetime consumption and assets decisions for agents alive in the initial time period
- get_lifetime_decisions_Future = Gets lifetime consumption and assets decisions for agents to be born in the future
Objects in Function:
- ck0alive_guess = Array: [I,S-1], Initial guess for kids consumption in this period for each agent alive
- ck0future_guess = Array: [I,T+S], Initial guess for initial kids consumption for each agent to be born in the future
- Chained_C_Condition = Array: [I,S,T+S], Disparity between left and right sides of Equation 3.22.
Should be all 0s if the household problem was solved correctly.
- Household_Euler = Array: [I,T+S], Leftover assets at the end of the final period each agents lives.
Should be all 0s if the household problem was solved correctly
- Modified_Budget_Constraint = Array: [I,S,T+S], Disparity between left and right sides of Equation 3.19.
Should be all 0s if the household problem was solved correctly.
Outputs:
- a_matrix[:,:-1,:self.T] = Array: [I,S,T+S], Assets transition path for each country and cohort
- c_matrix[:,:,:self.T] = Array: [I,S,T+S], Consumption transition path for each country and cohort
- cK_matrix[:,:,:self.T] = Array: [I,S,T+S:, Kids Consumption transition path for each country cohort
"""
#Initializes the consumption and assets matrices
c_matrix = np.zeros((self.I,self.S,self.T+self.S))
cK_matrix = np.zeros((self.I,self.S,self.T+self.S))
a_matrix = np.zeros((self.I,self.S+1,self.T+self.S))
a_matrix[:,:-1,0] = self.a_init
#Equation 3.19 for the oldest agent in time t=0. Note that this agent chooses to consume everything so that it has no assets in the following period
c_matrix[:,self.S-1,0] = (w_path[:,0]*self.e[:,self.S-1,0]*self.lbar[self.S-1] + (1 + r_path[0] - self.delta)*self.a_init[:,self.S-1] + bqvec_path[:,self.S-1,0])\
/(1+self.Kids[:,-1,0]*Gamma[:,-1,0]+w_path[:,0]*self.e[:,self.S-1,0]*(self.chi/(w_path[:,0]*self.e[:,self.S-1,0]))**(self.rho))
cK_matrix[:,self.S-1,0] = c_matrix[:,self.S-1,0]*Gamma[:,-1,0]
#Initial guess for agents currently alive
cK0alive_guess = np.ones((self.I, self.S-1))*.3
#Fills in c_matrix and a_matrix with the correct decisions for agents currently alive
start=time.time()
opt.root(Alive_EulerSystem, cK0alive_guess, args=(c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "\nFill time: NEW UPPER USING KRYLOV", time.time()-start
#Initializes a guess for the first vector for the fsolve to use
cK0future_guess = np.zeros((self.I,self.T))
for i in range(self.I):
cK0future_guess[i,:] = np.linspace(cK_matrix[i,1,0], self.cKvec_ss[i,-1], self.T)
#Solves for the entire consumption and assets matrices for agents not currently born
start=time.time()
opt.root(Future_EulerSystem, cK0future_guess, args=(c_matrix, cK_matrix, a_matrix, w_path, r_path, Gamma, bqvec_path), method="krylov", tol=1e-8)
if self.Matrix_Time: print "lower triangle fill time NOW USING KRYLOV", time.time()-start
#Prints consumption and assets matrices for country 0.
#NOTE: the output is the transform of the original matrices, so each row is time and each col is cohort
if Print_caTimepaths:
print "Consumption Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(c_matrix[0,:,:self.T]), decimals=3)
print "Assets Matrix for country 0", str("("+self.I_touse[0]+")")
print np.round(np.transpose(a_matrix[0,:,:self.T]), decimals=3)
#Prints if each set of conditions are satisfied or not
if Print_HH_Eulers:
#Gets matrices for the disparities of critical household conditions and constraints
Chained_C_Condition, Modified_Budget_Constraint, Consumption_Ratio, Household_Euler = check_household_conditions(w_path, r_path, c_matrix, cK_matrix, a_matrix, Gamma, bqvec_path)
#Checks to see if all of the Eulers are close enough to 0
print "\nEuler Household satisfied:", np.isclose(np.max(np.absolute(Household_Euler)), 0), np.max(np.absolute(Household_Euler))
print "Equation 4.26 satisfied:", np.isclose(np.max(np.absolute(Chained_C_Condition)), 0), np.max(np.absolute(Chained_C_Condition))
print "Equation 4.23 satisfied:", np.isclose(np.max(np.absolute(Modified_Budget_Constraint)), 0), np.max(np.absolute(Modified_Budget_Constraint))
print "Equation 4.25 satisfied", np.isclose(np.max(np.absolute(Consumption_Ratio)), 0), np.max(np.absolute(Consumption_Ratio))
#print np.round(np.transpose(Household_Euler[0,:]), decimals=8)
#print np.round(np.transpose(Modified_Budget_Constraint[0,:,:]), decimals=4)
#print np.round(np.transpose(Consumption_Ratio[0,:,:]), decimals=4)
#Returns only up until time T and not the vector
#print c_matrix[0,:,:self.T]
return c_matrix[:,:,:self.T], cK_matrix[:,:,:self.T], a_matrix[:,:-1,:self.T]
#GetTPIComponents continues here
#Equation 3.25, note that this hasn't changed from stage 3 to stage 4
alphvec=np.ones(self.I)*self.alpha
w_path = np.einsum("it,i->it",np.einsum("i,t->it",alphvec,1/r_path)**(self.alpha/(1-self.alpha)),(1-self.alpha)*self.A)
#Equation 4.22
Gamma = self.get_Gamma(w_path,self.e)
#Equations 4.25, 4.23
c_matrix, cK_matrix, a_matrix = get_c_cK_a_matrices(w_path, r_path, Gamma, bqvec_path, Print_HH_Eulers, Print_caTimepaths)
#Equation 4.24
lhat_path = self.get_lhat(c_matrix, w_path[:,:self.T], self.e[:,:,:self.T])
#Equation 4.17
n_path = self.get_n(lhat_path)
#Equation 4.16
kd_path = np.sum(a_matrix*self.Nhat[:,:,:self.T],axis=1)
#Equation 4.18
y_path = self.get_Y(kd_path,n_path)
#Equation 4.28
kf_path = np.outer(self.alpha*self.A, 1/r_path[:self.T])**( 1/(1-self.alpha) )*n_path - kd_path
return w_path, c_matrix, cK_matrix, a_matrix, kd_path, kf_path, n_path, y_path, lhat_path
def EulerSystemTPI(self, guess, Print_HH_Eulers, Print_caTimepaths):
"""
Description:
- Gives a system of Euler equations that must be satisfied (or = 0) for the transition paths to solve.
Inputs:
- guess = Array [(I+1)*T]: Current guess for the transition paths of bq and r
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
Variables Called from Object:
- self.MortalityRates = Array: [I,S,T+S], Mortality rates of each country for each age cohort and year
- self.Nhat = Array: [I,S,T+S], World population share of each country for each age cohort and year
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Variables Stored in Object:
- None
Other Functions Called:
- self.GetTPIComponents = Gets the transition paths for all the other variables in the model as a function of bqvec_path and r_path
- self.plot_timepaths = Takes the current iteration of the timepaths and plots them into one sheet of graphs
Objects in Function:
- a_matrix = Array: [I,S,T+S], Transition path for assets holdings in each country
- alldeadagent_assets = Array: [I,T+S], Assets of all of the agents who died in each period. Used to get Euler_bq.
- bqvec_path = Array: [I,S,T], Transition path for distribution of bequests for each country
- cK_matrix = Array: [I,S,T], Transition path for Kids consumption in each country
- c_matrix = Array: [I,S,T], Transition path for consumption in each country
- Euler_bq = Array: [I,T], Euler equation that must be satisfied for the model to solve. See Equation 3.29
- Euler_kf = Array: [T], Euler equation that must be satisfied for the model to solve. See Equation 3.24
- kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- kf_path = Array: [I,T], Transition path for foreign capital in each country
- lhat_path = Array: [I,S,T], Transition path for leisure for each cohort and country
- n_path = Array: [I,T], Transition path for total labor supply in each country
- r_path = Array: [T], Transition path for the intrest rate
- w_path = Array: [I,T], Transition path for the wage rate in each country
- y_path = Array: [I,T], Transition path for output in each country
Outputs:
- Euler_all = Array: [(I+1)*T], Euler_bq and Euler_kf combined to be the same shape as the input guess
"""
#Current guess for r and bq
guess = np.expand_dims(guess, axis=1).reshape((self.I+1,self.T))
r_path = guess[0,:]
bq_path = guess[1:,:]
#Imposes the steady state on the guesses for r and bq for S periods after T
r_path = np.hstack((r_path, np.ones(self.S)*self.r_ss))
bq_path = np.column_stack(( bq_path, np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initilizes the bequests distribution, which essentially is a copy of bq for each eligibly-aged agent
bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = np.einsum("it,s->ist", bq_path, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Gets all the other variables in the model as a funtion of bq and r
w_path, c_matrix, cK_matrix, a_matrix, kd_path, \
kf_path, n_path, y_path, lhat_path = self.GetTPIComponents(bqvec_path, r_path, Print_HH_Eulers, Print_caTimepaths)
#Sums up all the assets of agents that died in each period
alldeadagent_assets = np.sum(a_matrix[:,self.FirstDyingAge:,:]*\
self.MortalityRates[:,self.FirstDyingAge:,:self.T]*self.Nhat[:,self.FirstDyingAge:,:self.T], axis=1)
#Difference between assets of dead agents and our guesss for bequests. See Equation 3.29
Euler_bq = bq_path[:,:self.T] - alldeadagent_assets/np.sum(self.Nhat[:,self.FirstFertilityAge:self.FirstDyingAge,:self.T],\
axis=1)
#All the foreign-held capital must sum to 0. See Equation 3.24
Euler_kf = np.sum(kf_path,axis=0)
#Both Euler equations in one vector for the fsolve to play nice
Euler_all = np.append(Euler_bq, Euler_kf)
#Prints out info for the current iteration
if self.Iterate:
print "Iteration:", self.Timepath_counter, "Min Euler:", np.min(np.absolute(Euler_all)), "Mean Euler:", np.mean(np.absolute(Euler_all))\
, "Max Euler_bq:", np.max(np.absolute(Euler_bq)), "Max Euler_kf", np.max(np.absolute(Euler_kf))
#Will plot one of the graphs if the user specified outside the class
if self.Timepath_counter in self.IterationsToShow:
self.plot_timepaths(SAVE=False, Paths = (r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path))
#Keeps track of the current iteration of solving the transition path for the model
self.Timepath_counter += 1
return Euler_all
def Timepath_optimize(self, Print_HH_Eulers, Print_caTimepaths, Iters_to_show = set([])):
"""
Description:
- Solves for the transition path for each variable in the model
Inputs:
- Print_caTimepaths = Boolean: True prints out the timepaths of consumption and assets. For de-bugging mostly
- Print_HH_Eulers = Boolean: True prints out if all of the household equations were satisfied or not
- to_plot = Set: Set of integers that represent iterations of the transition path solver that the user wants plotted
Variables Called from Object:
- self.bqindiv_ss = Array: [I], Bequests each individual receives in the steady-state in each country
- self.FirstDyingAge = Int: First age where mortality rates effect agents
- self.FirstFertilityAge = Int: First age where agents give birth
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.r_ss = Scalar: Steady state intrest rate
- self.IterationsToShow = Set: A set of user inputs of iterations of TPI graphs to show
Variables Stored in Object:
- self.a_matrix = Array: [I,S,T], Transition path for assets holdings for each cohort in each country
- self.bqindiv_path = Array: [I,T+S], Transition path of bq that is given to each individual
- self.bqvec_path = Array: [I,S,T], Transition path for distribution of bequests for each country
- self.cK_matrix = Array: [I,S,T], Transition path for Kids consumption for each cohort in each country
- self.c_matrix = Array: [I,S,T], Transition path for consumption for each cohort in each country
- self.kd_path = Array: [I,T], Transition path for total domestically-owned capital in each country
- self.kf_path = Array: [I,T], Transition path for foreign capital in each country
- self.lhat_path = Array: [I,S,T+S], Transition path for leisure for each cohort and country
- self.n_path = Array: [I,T], Transition path for total labor supply in each country
- self.r_path = Array: [T+S], Transition path of r from year t=0 to t=T and imposes the steady state intrest rate for S periods beyond T
- self.w_path = Array: [I,T+S], Transition path for the wage rate in each country with the Steady state imposed for an additional S periods beyond T
- self.y_path = Array: [I,T], Transition path for output in each country
Other Functions Called:
- self.get_initialguesses = Gets initial guesses for the transition paths for r and bq
- self.EulerSystemTPI = Used by opt.solve in order to search over paths for r and bq that satisfy the Euler equations for the model
- self.GetTPIComponents = Gets all the other variables in the model once we already have found the correct paths for r and bq
Objects in Function:
- bqindiv_path_guess = Array: [I,T], Initial guess for the transition path for bq
- guess = Array: [(I+1)*T], Initial guess of r and bq to feed into opt.fsolve
- paths = Array: [I+1,T], Output of opt.fsolve. Contains the correct transition paths for r and bq
- rpath_guess = Array: [T], Initial guess for the transition path for r
Outputs:
- None
"""
#This is a set that will display the plot of the transition paths for all the variables in whatever iterations are in the set
self.IterationsToShow = Iters_to_show
#Gets an initial guess for the transition paths
rpath_guess, bqindiv_path_guess = self.get_initialguesses()
#Appends the guesses to feed into the opt.fsolve
guess = np.append(rpath_guess, bqindiv_path_guess)
#Solves for the correct transition paths
paths = opt.fsolve(self.EulerSystemTPI, guess, args=(Print_HH_Eulers, Print_caTimepaths))#, method="krylov", tol=1e-8)["x"]
#Reshapes the output of the opt.fsolve so that the first row is the transition path for r and
#the second through I rows are the transition paths of bq for each country
paths = np.expand_dims(paths, axis=1).reshape((self.I+1,self.T))
#Imposes the steady state for S years beyond time T
self.r_path = np.hstack((paths[0,:], np.ones(self.S)*self.r_ss))
self.bqindiv_path = np.column_stack(( paths[1:,:], np.outer(self.bqindiv_ss,np.ones(self.S)) ))
#Initialize bequests distribution
self.bqvec_path = np.zeros((self.I,self.S,self.T+self.S))
self.bqvec_path[:,self.FirstFertilityAge:self.FirstDyingAge,:] = np.einsum("it,s->ist", self.bqindiv_path, \
np.ones(self.FirstDyingAge-self.FirstFertilityAge))
#Gets the other variables in the model
self.w_path, self.c_matrix, self.cK_matrix, self.a_matrix, self.kd_path, self.kf_path, self.n_path, self.y_path, self.lhat_path = \
self.GetTPIComponents(self.bqvec_path, self.r_path, Print_HH_Eulers, Print_caTimepaths)
def plot_timepaths(self, SAVE=False, Paths = None):
"""
Description:
- Take the timepaths and plots them into an image with windows of different graphs
Inputs:
- bq_path = Array:[I,T+S], Given bequests path
- cK_matrix = Array:[I,S,T+S], Given kids consumption matrix
- c_matrix = Array:[I,S,T+S], Given consumption matrix
- kd_path = Array:[I,T+S], Given domestic capital path
- kf_path = Array:[I,T+S], Given foreign capital path
- lhat_path = Array:[I,S,T+S], Given time endowment
- n_path = Array:[I,T+S], Given aggregate labor productivity
- r_path = Array:[T+S], Given interest rate path
- SAVE = Boolean: Switch that determines whether we save the graphs or simply show it.
Variables Called from Object:
- self.cKmatrix = Array: [I,S], Steady State kids consumption
- self.cvec_ss = Array: [I,S], Steady state consumption
- self.kd_ss = Array: [I], Steady state total capital holdings for each country
- self.lhat_ss = Array: [I,S], Steady state leisure decision for each country and cohort
- self.n_ss = Array: [I], Steady state foreign capital in each country
- self.I = Int: Number of Countries
- self.S = Int: Number of Cohorts
- self.T = Int: Number of time periods
- self.Timepath_counter = Int: Counter that keeps track of the number of iterations in solving for the time paths
- self.I_touse = List: [I], Roster of countries that are being used
Variables Stored in Object:
- None
Other Functions Called:
- None
Objects in Function:
- name = String: Name of the .png file that will save the graphs.
- title = String: Overall title of the sheet of graphs
Outputs:
- None
"""
if Paths is None:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path = \
self.r_path, self.bqindiv_path, self.w_path, self.c_matrix, self.cK_matrix, self.lhat_path, self.n_path, self.kd_path, self.kf_path
else:
r_path, bq_path, w_path, c_matrix, cK_matrix, lhat_path, n_path, kd_path, kf_path = Paths
title = str("S = " + str(self.S) + ", T = " + str(self.T) + ", Iter: " + str(self.Timepath_counter))
plt.suptitle(title)
ax = plt.subplot(331)
for i in range(self.I):
plt.plot(range(self.S+self.T), r_path)
plt.title("r_path")
#plt.legend(self.I_touse)
ax.set_xticklabels([])
ax = plt.subplot(332)
for i in range(self.I):
plt.plot(range(self.S+self.T), bq_path[i,:])
plt.title("bqvec_path")
ax.set_xticklabels([])
ax = plt.subplot(333)
for i in range(self.I):
plt.plot(range(self.S+self.T), w_path[i,:])
plt.title("w_path")
ax.set_xticklabels([])
ax = plt.subplot(334)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(c_matrix[i,:,:],axis=0),np.ones(self.S)*np.sum(self.cvec_ss[i,:]))) )
plt.title("C_path")
ax.set_xticklabels([])
ax = plt.subplot(335)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((np.sum(cK_matrix[i,:,:],axis=0),np.ones(self.S)*np.sum(self.cKvec_ss[i,:]))) )
plt.title("CK_path")
ax.set_xticklabels([])
ax = plt.subplot(336)
for i in range(self.I):
plt.plot( range(self.S+self.T), np.hstack((np.sum(lhat_path[i,:,:],axis=0),np.ones(self.S)*np.sum(self.lhat_ss[i,:]))) )
plt.title("Lhat_path")
ax.set_xticklabels([])
plt.subplot(337)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((n_path[i,:],np.ones(self.S)*self.n_ss[i])))
plt.xlabel("Year")
plt.title("n_path")
plt.subplot(338)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kd_path[i,:],np.ones(self.S)*self.kd_ss[i])) )
plt.xlabel("Year")
plt.title("kd_path")
plt.subplot(339)
for i in range(self.I):
plt.plot(range(self.S+self.T), np.hstack((kf_path[i,:],np.ones(self.S)*self.kf_ss[i])))
plt.xlabel("Year")
plt.title("kf_path")
if SAVE:
name= "Graphs/OLGresult_Iter"+str(self.Timepath_counter)+"_"+str(self.I)+"_"+str(self.S)+"_"+str(self.sigma)+".png"
plt.savefig(name)
plt.clf()
else:
plt.show()
| mit |
rubikloud/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
ronekko/chainer | setup.py | 1 | 4992 | #!/usr/bin/env python
import os
import pkg_resources
import sys
from setuptools import setup
if sys.version_info[:3] == (3, 5, 0):
if not int(os.getenv('CHAINER_PYTHON_350_FORCE', '0')):
msg = """
Chainer does not work with Python 3.5.0.
We strongly recommend to use another version of Python.
If you want to use Chainer with Python 3.5.0 at your own risk,
set CHAINER_PYTHON_350_FORCE environment variable to 1."""
print(msg)
sys.exit(1)
def cupy_requirement(pkg):
return '{}==5.0.0b3'.format(pkg)
requirements = {
'install': [
'filelock',
'numpy>=1.9.0',
'protobuf>=3.0.0',
'six>=1.9.0',
],
'cuda': [
cupy_requirement('cupy'),
],
'stylecheck': [
'autopep8==1.3.5',
'flake8==3.5.0',
'pbr==4.0.4',
'pycodestyle==2.3.1',
],
'test': [
'pytest',
'mock',
],
'doctest': [
'matplotlib',
'theano',
],
'docs': [
'sphinx',
'sphinx_rtd_theme',
],
'travis': [
'-r stylecheck',
'-r test',
'-r docs',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
'pytest-cov',
'theano',
'h5py',
'pillow',
],
'appveyor': [
'-r test',
# pytest-timeout>=1.3.0 requires pytest>=3.6.
# TODO(niboshi): Consider upgrading pytest to >=3.6
'pytest-timeout<1.3.0',
'pytest-cov',
],
}
def reduce_requirements(key):
# Resolve recursive requirements notation (-r)
reqs = requirements[key]
resolved_reqs = []
for req in reqs:
if req.startswith('-r'):
depend_key = req[2:].lstrip()
reduce_requirements(depend_key)
resolved_reqs += requirements[depend_key]
else:
resolved_reqs.append(req)
requirements[key] = resolved_reqs
for k in requirements.keys():
reduce_requirements(k)
extras_require = {k: v for k, v in requirements.items() if k != 'install'}
setup_requires = []
install_requires = requirements['install']
tests_require = requirements['test']
def find_any_distribution(pkgs):
for pkg in pkgs:
try:
return pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
pass
return None
# Currently cupy provides source package (cupy) and binary wheel packages
# (cupy-cudaXX). Chainer can use any one of these packages.
cupy_pkg = find_any_distribution([
'cupy-cuda92',
'cupy-cuda91',
'cupy-cuda90',
'cupy-cuda80',
'cupy',
])
if cupy_pkg is not None:
req = cupy_requirement(cupy_pkg.project_name)
install_requires.append(req)
print('Use %s' % req)
else:
print('No CuPy installation detected')
here = os.path.abspath(os.path.dirname(__file__))
# Get __version__ variable
exec(open(os.path.join(here, 'chainer', '_version.py')).read())
setup(
name='chainer',
version=__version__, # NOQA
description='A flexible framework of neural networks',
author='Seiya Tokui',
author_email='[email protected]',
url='https://chainer.org/',
license='MIT License',
packages=['chainer',
'chainer.backends',
'chainer.dataset',
'chainer.datasets',
'chainer.distributions',
'chainer.exporters',
'chainer.functions',
'chainer.functions.activation',
'chainer.functions.array',
'chainer.functions.connection',
'chainer.functions.evaluation',
'chainer.functions.loss',
'chainer.functions.math',
'chainer.functions.noise',
'chainer.functions.normalization',
'chainer.functions.pooling',
'chainer.functions.theano',
'chainer.functions.util',
'chainer.function_hooks',
'chainer.iterators',
'chainer.initializers',
'chainer.links',
'chainer.links.activation',
'chainer.links.caffe',
'chainer.links.caffe.protobuf3',
'chainer.links.connection',
'chainer.links.loss',
'chainer.links.model',
'chainer.links.model.vision',
'chainer.links.normalization',
'chainer.links.theano',
'chainer.optimizers',
'chainer.optimizer_hooks',
'chainer.serializers',
'chainer.testing',
'chainer.training',
'chainer.training.extensions',
'chainer.training.triggers',
'chainer.training.updaters',
'chainer.utils'],
zip_safe=False,
setup_requires=setup_requires,
install_requires=install_requires,
tests_require=tests_require,
extras_require=extras_require,
)
| mit |
larsmans/scipy | scipy/cluster/hierarchy.py | 18 | 95902 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
cut_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import bisect
from collections import deque
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy._lib.six import string_types
from scipy._lib.six import xrange
_LINKAGE_METHODS = {'single': 0, 'complete': 1, 'average': 2, 'centroid': 3,
'median': 4, 'ward': 5, 'weighted': 6}
_EUCLIDEAN_METHODS = ('centroid', 'median', 'ward')
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'cut_tree', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
An :math:`(n-1)` by 4 matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns :math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str or function, optional
The distance metric to use in the case that y is a collection of
observation vectors; ignored otherwise. See the ``distance.pdist``
function for a list of valid distance metrics. A custom distance
function can also be used. See the ``distance.pdist`` function for
details.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
Notes
-----
1. For method 'single' an optimized algorithm called SLINK is implemented,
which has :math:`O(n^2)` time complexity.
For methods 'complete', 'average', 'weighted' and 'ward' an algorithm
called nearest-neighbors chain is implemented, which too has time
complexity :math:`O(n^2)`.
For other methods a naive algorithm is implemented with :math:`O(n^3)`
time complexity.
All algorithms use :math:`O(n^2)` memory.
Refer to [1]_ for details about the algorithms.
2. Methods 'centroid', 'median' and 'ward' are correctly defined only if
Euclidean pairwise metric is used. If `y` is passed as precomputed
pairwise distances, then it is a user responsibility to assure that
these distances are in fact Euclidean, otherwise the produced result
will be incorrect.
References
----------
.. [1] Daniel Mullner, "Modern hierarchical, agglomerative clustering
algorithms", `arXiv:1109.2378v1 <http://arxiv.org/abs/1109.2378v1>`_
, 2011.
"""
if method not in _LINKAGE_METHODS:
raise ValueError("Invalid method: {0}".format(method))
y = _convert_to_double(np.asarray(y, order='c'))
if y.ndim == 1:
distance.is_valid_y(y, throw=True, name='y')
[y] = _copy_arrays_if_base_present([y])
elif y.ndim == 2:
if method in _EUCLIDEAN_METHODS and metric != 'euclidean':
raise ValueError("Method '{0}' requires the distance metric "
"to be Euclidean".format(method))
y = distance.pdist(y, metric)
else:
raise ValueError("`y` must be 1 or 2 dimensional.")
n = int(distance.num_obs_y(y))
method_code = _LINKAGE_METHODS[method]
if method == 'single':
return _hierarchy.slink(y, n)
elif method in ['complete', 'average', 'weighted', 'ward']:
return _hierarchy.nn_chain(y, n, method_code)
else:
return _hierarchy.linkage(y, n, method_code)
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def __lt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist < node.dist
def __gt__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist > node.dist
def __eq__(self, node):
if not isinstance(node, ClusterNode):
raise ValueError("Can't compare ClusterNode "
"to type {}".format(type(node)))
return self.dist == node.dist
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def _order_cluster_tree(Z):
"""
Returns clustering nodes in bottom-up order by distance.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
Returns
-------
nodes : list
A list of ClusterNode objects.
"""
q = deque()
tree = to_tree(Z)
q.append(tree)
nodes = []
while q:
node = q.popleft()
if not node.is_leaf():
bisect.insort_left(nodes, node)
q.append(node.get_right())
q.append(node.get_left())
return nodes
def cut_tree(Z, n_clusters=None, height=None):
"""
Given a linkage matrix Z, return the cut tree.
Parameters
----------
Z : scipy.cluster.linkage array
The linkage matrix.
n_clusters : array_like, optional
Number of clusters in the tree at the cut point.
height : array_like, optional
The height at which to cut the tree. Only possible for ultrametric
trees.
Returns
-------
cutree : array
An array indicating group membership at each agglomeration step. I.e.,
for a full cut tree, in the first column each data point is in its own
cluster. At the next step, two nodes are merged. Finally all singleton
and non-singleton clusters are in one group. If `n_clusters` or
`height` is given, the columns correspond to the columns of `n_clusters` or
`height`.
Examples
--------
>>> from scipy import cluster
>>> np.random.seed(23)
>>> X = np.random.randn(50, 4)
>>> Z = cluster.hierarchy.ward(X)
>>> cutree = cluster.hierarchy.cut_tree(Z, n_clusters=[5, 10])
>>> cutree[:10]
array([[0, 0],
[1, 1],
[2, 2],
[3, 3],
[3, 4],
[2, 2],
[0, 0],
[1, 5],
[3, 6],
[4, 7]])
"""
nobs = num_obs_linkage(Z)
nodes = _order_cluster_tree(Z)
if height is not None and n_clusters is not None:
raise ValueError("At least one of either height or n_clusters "
"must be None")
elif height is None and n_clusters is None: # return the full cut tree
cols_idx = np.arange(nobs)
elif height is not None:
heights = np.array([x.dist for x in nodes])
cols_idx = np.searchsorted(heights, height)
else:
cols_idx = nobs - np.searchsorted(np.arange(nobs), n_clusters)
try:
n_cols = len(cols_idx)
except TypeError: # scalar
n_cols = 1
cols_idx = np.array([cols_idx])
groups = np.zeros((n_cols, nobs), dtype=int)
last_group = np.arange(nobs)
if 0 in cols_idx:
groups[0] = last_group
for i, node in enumerate(nodes):
idx = node.pre_order()
this_group = last_group.copy()
this_group[idx] = last_group[idx].min()
this_group[this_group > last_group[idx].max()] -= 1
if i + 1 in cols_idx:
groups[np.where(i + 1 == cols_idx)[0]] = this_group
last_group = this_group
return groups.T
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# Number of original objects is equal to the number of rows minus 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != bool:
X = X.astype(bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see `linkage` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n-1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy**2
denomB = Zz**2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
r"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical
clustering). See `linkage` documentation for more information on its
form.
d : int, optional
The number of links up to `d` levels below each non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row contains the link
statistics for the non-singleton cluster ``i``. The link statistics are
computed over the link heights for links :math:`d` levels below the
cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is the number
of links included in the calculation; and ``R[i,3]`` is the
inconsistency coefficient,
.. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(R) != np.ndarray:
raise TypeError('Variable %spassed as inconsistency matrix is not '
'a numpy array.' % name_str)
if R.dtype != np.double:
raise TypeError('Inconsistency matrix %smust contain doubles '
'(double).' % name_str)
if len(R.shape) != 2:
raise ValueError('Inconsistency matrix %smust have shape=2 (i.e. '
'be two-dimensional).' % name_str)
if R.shape[1] != 4:
raise ValueError('Inconsistency matrix %smust have 4 columns.' %
name_str)
if R.shape[0] < 1:
raise ValueError('Inconsistency matrix %smust have at least one '
'row.' % name_str)
if (R[:, 0] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height means.' % name_str)
if (R[:, 1] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'height standard deviations.' % name_str)
if (R[:, 2] < 0).any():
raise ValueError('Inconsistency matrix %scontains negative link '
'counts.' % name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional array (type double)
with :math:`n` rows and 4 columns. The first two columns must contain
indices between 0 and :math:`2n-1`. For a given row ``i``, the following
two expressions have to hold:
.. math::
0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1
0 \\leq Z[i,1] \\leq i+n-1
I.e. a cluster cannot join another cluster unless the cluster being joined
has been generated.
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
name_str = "%r " % name if name else ''
try:
if type(Z) != np.ndarray:
raise TypeError('Passed linkage argument %sis not a valid array.' %
name_str)
if Z.dtype != np.double:
raise TypeError('Linkage matrix %smust contain doubles.' % name_str)
if len(Z.shape) != 2:
raise ValueError('Linkage matrix %smust have shape=2 (i.e. be '
'two-dimensional).' % name_str)
if Z.shape[1] != 4:
raise ValueError('Linkage matrix %smust have 4 columns.' % name_str)
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
raise ValueError('Linkage %scontains negative indices.' %
name_str)
if (Z[:, 2] < 0).any():
raise ValueError('Linkage %scontains negative distances.' %
name_str)
if (Z[:, 3] < 0).any():
raise ValueError('Linkage %scontains negative counts.' %
name_str)
if _check_hierarchy_uses_cluster_before_formed(Z):
raise ValueError('Linkage %suses non-singleton cluster before '
'it is formed.' % name_str)
if _check_hierarchy_uses_cluster_more_than_once(Z):
raise ValueError('Linkage %suses the same cluster more than once.'
% name_str)
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do::
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do::
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, ``monocrit[i] >= monocrit[j]``.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None, above_threshold_color='b'):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot "
"the dendrogram. Use no_plot=True to calculate the "
"dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Dependent variable plot height
dvw = mh + mh * 0.05
iv_ticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation in ('top', 'bottom'):
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
else:
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(iv_ticks)
if orientation == 'top':
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
leaf_rot = float(_get_tick_rotation(len(ivl))) if (
leaf_rotation is None) else leaf_rotation
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
ax.set_xticklabels(ivl, rotation=leaf_rot, size=leaf_font)
elif orientation in ('left', 'right'):
if orientation == 'left':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
else:
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(iv_ticks)
if orientation == 'left':
ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
leaf_font = float(_get_tick_text_size(len(ivl))) if (
leaf_font_size is None) else leaf_font_size
if leaf_rotation is not None:
ax.set_yticklabels(ivl, rotation=leaf_rotation, size=leaf_font)
else:
ax.set_yticklabels(ivl, size=leaf_font)
# Let's use collections instead. This way there is a separate legend item
# for each tree grouping, rather than stupidly one for each line segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the groupings below the color threshold.
for color in colors_used:
if color != above_threshold_color:
ax.add_collection(colors_to_collections[color])
# If there's a grouping of links above the color threshold, it goes last.
if above_threshold_color in colors_to_collections:
ax.add_collection(colors_to_collections[above_threshold_color])
if contraction_marks is not None:
Ellipse = matplotlib.patches.Ellipse
for (x, y) in contraction_marks:
if orientation in ('left', 'right'):
e = Ellipse((y, x), width=dvw / 100, height=1.0)
else:
e = Ellipse((x, y), width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for use by dendrogram.
Note that this palette is global (i.e. setting it once changes the colors
for all subsequent calls to `dendrogram`) and that it affects only the
the colors below ``color_threshold``.
Note that `dendrogram` also accepts a custom coloring function through its
``link_color_func`` keyword, which is more flexible and non-global.
Parameters
----------
palette : list of str or None
A list of matplotlib color codes. The order of the color codes is the
order in which the colors are cycled through when color thresholding in
the dendrogram.
If ``None``, resets the palette to its default (which is
``['g', 'r', 'c', 'm', 'y', 'k']``).
Returns
-------
None
See Also
--------
dendrogram
Notes
-----
Ability to reset the palette with ``None`` added in Scipy 0.17.0.
Examples
--------
>>> from scipy.cluster import hierarchy
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268., 400.,
... 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['g', 'b', 'b', 'b', 'b']
>>> hierarchy.set_link_color_palette(['c', 'm', 'y', 'k'])
>>> dn = hierarchy.dendrogram(Z, no_plot=True)
>>> dn['color_list']
['c', 'b', 'b', 'b', 'b']
>>> dn = hierarchy.dendrogram(Z, no_plot=True, color_threshold=267,
... above_threshold_color='k')
>>> dn['color_list']
['c', 'm', 'm', 'k', 'k']
Now reset the color palette to its default:
>>> hierarchy.set_link_color_palette(None)
"""
if palette is None:
# reset to its default
palette = ['g', 'r', 'c', 'm', 'y', 'k']
elif type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, leaf_font_size=None,
leaf_rotation=None, leaf_label_func=None,
show_contracted=False, link_color_func=None, ax=None,
above_threshold_color='b'):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do::
# First define the leaf label function.
def llf(id):
if id < n:
return str(id)
else:
return '[%d %d %1.2f]' % (id, count, R[n-id,3])
# The text for the leaf nodes is going to be big so force
# a rotation of 90 degrees.
dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example::
dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
above_threshold_color : str, optional
This matplotlib color string sets the color of the links above the
color_threshold. The default is 'b'.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
See Also
--------
linkage, set_link_color_palette
Examples
--------
>>> from scipy.cluster import hierarchy
>>> import matplotlib.pyplot as plt
A very basic example:
>>> ytdist = np.array([662., 877., 255., 412., 996., 295., 468., 268.,
... 400., 754., 564., 138., 219., 869., 669.])
>>> Z = hierarchy.linkage(ytdist, 'single')
>>> plt.figure()
>>> dn = hierarchy.dendrogram(Z)
Now plot in given axes, improve the color scheme and use both vertical and
horizontal orientations:
>>> hierarchy.set_link_color_palette(['m', 'c', 'y', 'k'])
>>> fig, axes = plt.subplots(1, 2, figsize=(8, 3))
>>> dn1 = hierarchy.dendrogram(Z, ax=axes[0], above_threshold_color='y',
... orientation='top')
>>> dn2 = hierarchy.dendrogram(Z, ax=axes[1], above_threshold_color='#bcbddc',
... orientation='right')
>>> hierarchy.set_link_color_palette(None) # reset to default after use
>>> plt.show()
"""
# This feature was thought about but never implemented (still useful?):
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
ivl = [] # list of leaves
if color_threshold is None or (isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
# Empty list will be filled in _dendrogram_calculate_info
contraction_marks = [] if show_contracted else None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2*n - 2,
iv=0.0,
ivl=ivl,
n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list,
lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list,
leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax,
above_threshold_color=above_threshold_color)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None,
above_threshold_color='b'):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the ``max(Z[*,2]``) for all nodes ``*`` below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# its label is either the empty string or the number of original
# observations belonging to cluster i.
if 2 * n - p > i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = above_threshold_color
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func,
above_threshold_color=above_threshold_color)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See `linkage` for more
information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
| bsd-3-clause |
spallavolu/scikit-learn | examples/linear_model/plot_robust_fit.py | 238 | 2414 | """
Robust linear estimator fitting
===============================
Here a sine function is fit with a polynomial of order 3, for values
close to zero.
Robust fitting is demoed in different situations:
- No measurement errors, only modelling errors (fitting a sine with a
polynomial)
- Measurement errors in X
- Measurement errors in y
The median absolute deviation to non corrupt new data is used to judge
the quality of the prediction.
What we can see that:
- RANSAC is good for strong outliers in the y direction
- TheilSen is good for small outliers, both in direction X and y, but has
a break point above which it performs worst than OLS.
"""
from matplotlib import pyplot as plt
import numpy as np
from sklearn import linear_model, metrics
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
np.random.seed(42)
X = np.random.normal(size=400)
y = np.sin(X)
# Make sure that it X is 2D
X = X[:, np.newaxis]
X_test = np.random.normal(size=200)
y_test = np.sin(X_test)
X_test = X_test[:, np.newaxis]
y_errors = y.copy()
y_errors[::3] = 3
X_errors = X.copy()
X_errors[::3] = 3
y_errors_large = y.copy()
y_errors_large[::3] = 10
X_errors_large = X.copy()
X_errors_large[::3] = 10
estimators = [('OLS', linear_model.LinearRegression()),
('Theil-Sen', linear_model.TheilSenRegressor(random_state=42)),
('RANSAC', linear_model.RANSACRegressor(random_state=42)), ]
x_plot = np.linspace(X.min(), X.max())
for title, this_X, this_y in [
('Modeling errors only', X, y),
('Corrupt X, small deviants', X_errors, y),
('Corrupt y, small deviants', X, y_errors),
('Corrupt X, large deviants', X_errors_large, y),
('Corrupt y, large deviants', X, y_errors_large)]:
plt.figure(figsize=(5, 4))
plt.plot(this_X[:, 0], this_y, 'k+')
for name, estimator in estimators:
model = make_pipeline(PolynomialFeatures(3), estimator)
model.fit(this_X, this_y)
mse = metrics.mean_squared_error(model.predict(X_test), y_test)
y_plot = model.predict(x_plot[:, np.newaxis])
plt.plot(x_plot, y_plot,
label='%s: error = %.3f' % (name, mse))
plt.legend(loc='best', frameon=False,
title='Error: mean absolute deviation\n to non corrupt data')
plt.xlim(-4, 10.2)
plt.ylim(-2, 10.2)
plt.title(title)
plt.show()
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 35 | 15016 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
karlnapf/kernel_exp_family | kernel_exp_family/examples/tools.py | 1 | 1235 | import matplotlib.pyplot as plt
import numpy as np
def visualise_array(Xs, Ys, A, samples=None):
im = plt.imshow(A, origin='lower')
im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()])
im.set_interpolation('nearest')
im.set_cmap('gray')
if samples is not None:
plt.plot(samples[:, 0], samples[:, 1], 'bx')
plt.ylim([Ys.min(), Ys.max()])
plt.xlim([Xs.min(), Xs.max()])
def pdf_grid(Xs, Ys, est):
D = np.zeros((len(Xs), len(Ys)))
G = np.zeros(D.shape)
# this is in-efficient, log_pdf_multiple on a 2d array is faster
for i, x in enumerate(Xs):
for j, y in enumerate(Ys):
point = np.array([x, y])
D[j, i] = est.log_pdf(point)
G[j, i] = np.linalg.norm(est.grad(point))
return D, G
def visualise_fit_2d(est, X, Xs=None, Ys=None):
# visualise found fit
plt.figure()
if Xs is None:
Xs = np.linspace(-5, 5)
if Ys is None:
Ys = np.linspace(-5, 5)
D, G = pdf_grid(Xs, Ys, est)
plt.subplot(121)
visualise_array(Xs, Ys, D, X)
plt.title("log pdf")
plt.subplot(122)
visualise_array(Xs, Ys, G, X)
plt.title("gradient norm")
plt.tight_layout() | bsd-3-clause |
potash/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
anirudhjayaraman/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
blancha/abcngspipelines | quality_control/rnaseqc.py | 1 | 4436 | #!/usr/bin/env python3
# Version 1.0
# Author Alexis Blanchet-Cohen
# Date: 09/06/2014
import argparse
import glob
import os
import os.path
import pandas
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description="Generates rnaseqc scripts.")
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="rnaseqc")
parser.add_argument("-i", "--inputDirectory", help="Input directory with BAM files.", default="../results/tophat")
parser.add_argument("-o", "--outputDirectory", help="Output directory with rnaseqc results.", default="../results/rnaseqc")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# If not in the main scripts directory, cd to the main scripts directory, if it exists.
util.cdMainScriptsDirectory()
# Process command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
# Check if the inputDirectory exists, and is a directory.
util.checkInputDirectory(inputDirectory)
# Create script and output directories, if they do not exist yet.
util.makeDirectory(outputDirectory)
util.makeDirectory(scriptsDirectory)
# Read configuration files
config = util.readConfigurationFiles()
header = config.getboolean("server", "PBS_header")
processors = config.get("rnaseqc", "processors")
trim = config.getboolean("project", "trim")
stranded = config.getboolean("project", "stranded")
genome = config.get("project", "genome")
genomeFile = config.get(genome, "genomeFile")
gtfFile = config.get(genome, "gtfFile")
rnaseqc_gtfFile = config.get(genome, "rnaseqc_gtfFile")
rrna = config.get(genome, "rrna")
samplesFile = pandas.read_csv("samples.txt", sep="\t")
# Get samples
samples = samplesFile["sample"]
conditions = samplesFile["group"]
# Change to scripts directory
os.chdir(scriptsDirectory)
###########################
# reorder_index_sample.sh #
###########################
# Create scripts subdirectory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory + "/reorder_index"):
os.mkdir(scriptsDirectory + "/reorder_index")
os.chdir(scriptsDirectory + "/reorder_index")
for sample in samples:
scriptName = "reorder_index_" + sample + ".sh"
script = open(scriptName, "w")
if header:
util.writeHeader(script, config, "reorder_index")
# Reorder
script.write("java -jar -Xmx" + str(int(processors) * 2700) + "m " + config.get('picard', 'folder') + "ReorderSam.jar \\\n")
script.write("I=" + inputDirectory + "/" + sample + "/accepted_hits.bam \\\n")
script.write("OUTPUT=" + os.path.join(inputDirectory, sample, "accepted_hits_reordered.bam") + " \\\n")
script.write("REFERENCE=" + genomeFile[:-3] + ".reordered.karyotypic.fa" + " \\\n")
script.write("CREATE_INDEX=true" + " \\\n")
script.write("&> " + scriptName + ".log")
os.chdir("..")
##############
# rnaseqc.sh #
##############
scriptName = "rnaseqc.sh"
script = open(scriptName, "w")
if header:
util.writeHeader(script, config, "rnaseqc")
script.write("java -jar -Xmx" + str(int(processors) * 2700) + "m " + config.get('rnaseqc', 'file') + " \\\n")
script.write("-o " + outputDirectory + " \\\n")
script.write("-BWArRNA " + rrna + " \\\n")
script.write("-r " + genomeFile[:-3] + ".reordered.karyotypic.fa" + " \\\n")
script.write("-t " + rnaseqc_gtfFile + " \\\n")
script.write("-s sampleFile.txt " + "\\\n")
script.write("&> " + scriptName + ".log")
script.write("\n\n")
script.write("rm " + os.path.join(inputDirectory, "*", "*reordered.bam") + " \\\n")
script.write("&>> " + scriptName + ".log")
script.write("\n")
script.write("rm " + os.path.join(inputDirectory, "*", "*reordered.bai") + " \\\n")
script.write("&>> " + scriptName + ".log")
script.close()
###################
# sampleFile.txt. #
###################
sampleFile = open("sampleFile.txt", "w")
sampleFile.write("sample ID\tBam File\tNotes\n")
for index, sample in enumerate(samples):
sampleFile.write(sample + "\t" + os.path.join(inputDirectory, sample, "accepted_hits_reordered.bam") + "\t" + conditions[index] + "\n")
sampleFile.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 |
walterreade/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 94 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
rsignell-usgs/notebook | ERDDAP/OOI-ERDDAP_Search.py | 1 | 3947 |
# coding: utf-8
# # Search OOI ERDDAP for Pioneer Glider Data
# Use ERDDAP's RESTful advanced search to try to find OOI Pioneer glider water temperatures from the OOI ERDDAP. Use case from Stace Beaulieu ([email protected])
# In[1]:
import pandas as pd
# ### First try just searching for "glider"
# In[2]:
url = 'http://ooi-data.marine.rutgers.edu/erddap/search/advanced.csv?page=1&itemsPerPage=1000&searchFor=glider'
dft = pd.read_csv(url, usecols=['Title', 'Summary', 'Institution', 'Dataset ID'])
dft.head()
# ### Now search for all temperature data in specified bounding box and temporal extent
# In[3]:
start = '2000-01-01T00:00:00Z'
stop = '2017-02-22T00:00:00Z'
lat_min = 39.
lat_max = 41.5
lon_min = -72.
lon_max = -69.
standard_name = 'sea_water_temperature'
endpoint = 'http://ooi-data.marine.rutgers.edu/erddap/search/advanced.csv'
# In[4]:
import pandas as pd
base = (
'{}'
'?page=1'
'&itemsPerPage=1000'
'&searchFor='
'&protocol=(ANY)'
'&cdm_data_type=(ANY)'
'&institution=(ANY)'
'&ioos_category=(ANY)'
'&keywords=(ANY)'
'&long_name=(ANY)'
'&standard_name={}'
'&variableName=(ANY)'
'&maxLat={}'
'&minLon={}'
'&maxLon={}'
'&minLat={}'
'&minTime={}'
'&maxTime={}').format
url = base(
endpoint,
standard_name,
lat_max,
lon_min,
lon_max,
lat_min,
start,
stop
)
print(url)
# In[5]:
dft = pd.read_csv(url, usecols=['Title', 'Summary', 'Institution','Dataset ID'])
print('Datasets Found = {}'.format(len(dft)))
print(url)
dft
# Define a function that returns a Pandas DataFrame based on the dataset ID. The ERDDAP request variables (e.g. "ctdpf_ckl_wfp_instrument_ctdpf_ckl_seawater_temperature") are hard-coded here, so this routine should be modified for other ERDDAP endpoints or datasets.
#
# Since we didn't actually find any glider data, we just request the last temperature value from each dataset, using the ERDDAP `orderByMax("time")` constraint. This way we can see when the data ends, and if the mooring locations look correct
# In[6]:
def download_df(glider_id):
from pandas import DataFrame, read_csv
# from urllib.error import HTTPError
uri = ('http://ooi-data.marine.rutgers.edu/erddap/tabledap/{}.csv'
'?trajectory,'
'time,latitude,longitude,'
'ctdpf_ckl_wfp_instrument_ctdpf_ckl_seawater_temperature'
'&orderByMax("time")'
'&time>={}'
'&time<={}'
'&latitude>={}'
'&latitude<={}'
'&longitude>={}'
'&longitude<={}').format
url = uri(glider_id,start,stop,lat_min,lat_max,lon_min,lon_max)
print(url)
# Not sure if returning an empty df is the best idea.
try:
df = read_csv(url, index_col='time', parse_dates=True, skiprows=[1])
except:
df = pd.DataFrame()
return df
# In[7]:
df = pd.concat(list(map(download_df, dft['Dataset ID'].values)))
# In[8]:
print('Total Data Values Found: {}'.format(len(df)))
# In[9]:
df
# In[10]:
get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from cartopy.feature import NaturalEarthFeature
bathym_1000 = NaturalEarthFeature(name='bathymetry_J_1000',
scale='10m', category='physical')
fig, ax = plt.subplots(
figsize=(9, 9),
subplot_kw=dict(projection=ccrs.PlateCarree())
)
ax.coastlines(resolution='10m')
ax.add_feature(bathym_1000, facecolor=[0.9, 0.9, 0.9], edgecolor='none')
dx = dy = 0.5
ax.set_extent([lon_min-dx, lon_max+dx, lat_min-dy, lat_max+dy])
g = df.groupby('trajectory')
for glider in g.groups:
traj = df[df['trajectory'] == glider]
ax.plot(traj['longitude'], traj['latitude'], 'o', label=glider)
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=2, color='gray', alpha=0.5, linestyle='--')
ax.legend();
# In[ ]:
| mit |
wxiaolei/CSDN-CODE | Jupyter-notebook-config远程服务配置/.jupyter/jupyter_notebook_config.py | 1 | 20809 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
c.NotebookApp.certfile = u'/home/xiaolei/.jupyter/mycert.pem'
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Use minified JS file or not, mainly use during dev to avoid JS recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 0
## (msg/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 0
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
c.NotebookApp.keyfile = u'/home/xiaolei/.jupyter/mykey.key'
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The url for MathJax.js.
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = u'sha1:69de81e341de:6b9ef62b448049f5502dff50319ad140d35ffc30'
## The port the notebook server will listen on.
c.NotebookApp.port = 9999
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 1.0
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'xiaolei'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
#c.NotebookNotary.cache_size = 65535
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| gpl-3.0 |
hamurabiaraujo/python-statistic | exercicios/q1.py | 1 | 1238 | from csv import reader
from matplotlib import pyplot as plt
from math import pow, sqrt
with open('data.csv', 'rb') as csvFile:
csvReader = reader(csvFile, delimiter=';', quotechar='|')
averages = []
total = 0
summ = 0
minorThanAverage = 0
greaterThanAverage = 0
for row in csvReader:
averages.append(( float(row[1]) + float(row[2])) / 2)
total += ((float(row[1]) + float(row[2])) / 2)
av = total / len(averages)
for a in averages :
summ += pow((a - av), 2)
if a < av :
minorThanAverage += 1
elif a > av :
greaterThanAverage += 1
plt.bar(range(1,31), averages)
plt.title("Grafico de medias")
plt.ylabel("Nota")
plt.xlabel("Aluno")
plt.plot(range(1,31), [av for i in xrange(len(averages))], color="red",linestyle='solid')
plt.show()
print 'Amplitude: ' + format(max(averages) - min(averages))
print 'Desvio padrão: ' + format(sqrt(summ / len(averages)))
print 'Quantidade de alunos com nota menor que a média: ' + format(minorThanAverage)
print 'Quantidade de alunos com nota maior que a média: ' + format(greaterThanAverage)
| mit |
kdebrab/pandas | asv_bench/benchmarks/timeseries.py | 3 | 11496 | import warnings
from datetime import timedelta
import numpy as np
from pandas import to_datetime, date_range, Series, DataFrame, period_range
from pandas.tseries.frequencies import infer_freq
try:
from pandas.plotting._converter import DatetimeConverter
except ImportError:
from pandas.tseries.converter import DatetimeConverter
from .pandas_vb_common import setup # noqa
class DatetimeIndex(object):
goal_time = 0.2
params = ['dst', 'repeated', 'tz_aware', 'tz_naive']
param_names = ['index_type']
def setup(self, index_type):
N = 100000
dtidxes = {'dst': date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S'),
'repeated': date_range(start='2000',
periods=N / 10,
freq='s').repeat(10),
'tz_aware': date_range(start='2000',
periods=N,
freq='s',
tz='US/Eastern'),
'tz_naive': date_range(start='2000',
periods=N,
freq='s')}
self.index = dtidxes[index_type]
def time_add_timedelta(self, index_type):
self.index + timedelta(minutes=2)
def time_normalize(self, index_type):
self.index.normalize()
def time_unique(self, index_type):
self.index.unique()
def time_to_time(self, index_type):
self.index.time
def time_get(self, index_type):
self.index[0]
def time_timeseries_is_month_start(self, index_type):
self.index.is_month_start
def time_to_date(self, index_type):
self.index.date
def time_to_pydatetime(self, index_type):
self.index.to_pydatetime()
class TzLocalize(object):
goal_time = 0.2
def setup(self):
dst_rng = date_range(start='10/29/2000 1:00:00',
end='10/29/2000 1:59:59', freq='S')
self.index = date_range(start='10/29/2000',
end='10/29/2000 00:59:59', freq='S')
self.index = self.index.append(dst_rng)
self.index = self.index.append(dst_rng)
self.index = self.index.append(date_range(start='10/29/2000 2:00:00',
end='10/29/2000 3:00:00',
freq='S'))
def time_infer_dst(self):
self.index.tz_localize('US/Eastern', ambiguous='infer')
class ResetIndex(object):
goal_time = 0.2
params = [None, 'US/Eastern']
param_names = 'tz'
def setup(self, tz):
idx = date_range(start='1/1/2000', periods=1000, freq='H', tz=tz)
self.df = DataFrame(np.random.randn(1000, 2), index=idx)
def time_reest_datetimeindex(self, tz):
self.df.reset_index()
class Factorize(object):
goal_time = 0.2
params = [None, 'Asia/Tokyo']
param_names = 'tz'
def setup(self, tz):
N = 100000
self.dti = date_range('2011-01-01', freq='H', periods=N, tz=tz)
self.dti = self.dti.repeat(5)
def time_factorize(self, tz):
self.dti.factorize()
class InferFreq(object):
goal_time = 0.2
params = [None, 'D', 'B']
param_names = ['freq']
def setup(self, freq):
if freq is None:
self.idx = date_range(start='1/1/1700', freq='D', periods=10000)
self.idx.freq = None
else:
self.idx = date_range(start='1/1/1700', freq=freq, periods=10000)
def time_infer_freq(self, freq):
infer_freq(self.idx)
class TimeDatetimeConverter(object):
goal_time = 0.2
def setup(self):
N = 100000
self.rng = date_range(start='1/1/2000', periods=N, freq='T')
def time_convert(self):
DatetimeConverter.convert(self.rng, None, None)
class Iteration(object):
goal_time = 0.2
params = [date_range, period_range]
param_names = ['time_index']
def setup(self, time_index):
N = 10**6
self.idx = time_index(start='20140101', freq='T', periods=N)
self.exit = 10000
def time_iter(self, time_index):
for _ in self.idx:
pass
def time_iter_preexit(self, time_index):
for i, _ in enumerate(self.idx):
if i > self.exit:
break
class ResampleDataFrame(object):
goal_time = 0.2
params = ['max', 'mean', 'min']
param_names = ['method']
def setup(self, method):
rng = date_range(start='20130101', periods=100000, freq='50L')
df = DataFrame(np.random.randn(100000, 2), index=rng)
self.resample = getattr(df.resample('1s'), method)
def time_method(self, method):
self.resample()
class ResampleSeries(object):
goal_time = 0.2
params = (['period', 'datetime'], ['5min', '1D'], ['mean', 'ohlc'])
param_names = ['index', 'freq', 'method']
def setup(self, index, freq, method):
indexes = {'period': period_range(start='1/1/2000',
end='1/1/2001',
freq='T'),
'datetime': date_range(start='1/1/2000',
end='1/1/2001',
freq='T')}
idx = indexes[index]
ts = Series(np.random.randn(len(idx)), index=idx)
self.resample = getattr(ts.resample(freq), method)
def time_resample(self, index, freq, method):
self.resample()
class ResampleDatetetime64(object):
# GH 7754
goal_time = 0.2
def setup(self):
rng3 = date_range(start='2000-01-01 00:00:00',
end='2000-01-01 10:00:00', freq='555000U')
self.dt_ts = Series(5, rng3, dtype='datetime64[ns]')
def time_resample(self):
self.dt_ts.resample('1S').last()
class AsOf(object):
goal_time = 0.2
params = ['DataFrame', 'Series']
param_names = ['constructor']
def setup(self, constructor):
N = 10000
M = 10
rng = date_range(start='1/1/1990', periods=N, freq='53s')
data = {'DataFrame': DataFrame(np.random.randn(N, M)),
'Series': Series(np.random.randn(N))}
self.ts = data[constructor]
self.ts.index = rng
self.ts2 = self.ts.copy()
self.ts2.iloc[250:5000] = np.nan
self.ts3 = self.ts.copy()
self.ts3.iloc[-5000:] = np.nan
self.dates = date_range(start='1/1/1990', periods=N * 10, freq='5s')
self.date = self.dates[0]
self.date_last = self.dates[-1]
self.date_early = self.date - timedelta(10)
# test speed of pre-computing NAs.
def time_asof(self, constructor):
self.ts.asof(self.dates)
# should be roughly the same as above.
def time_asof_nan(self, constructor):
self.ts2.asof(self.dates)
# test speed of the code path for a scalar index
# without *while* loop
def time_asof_single(self, constructor):
self.ts.asof(self.date)
# test speed of the code path for a scalar index
# before the start. should be the same as above.
def time_asof_single_early(self, constructor):
self.ts.asof(self.date_early)
# test the speed of the code path for a scalar index
# with a long *while* loop. should still be much
# faster than pre-computing all the NAs.
def time_asof_nan_single(self, constructor):
self.ts3.asof(self.date_last)
class SortIndex(object):
goal_time = 0.2
params = [True, False]
param_names = ['monotonic']
def setup(self, monotonic):
N = 10**5
idx = date_range(start='1/1/2000', periods=N, freq='s')
self.s = Series(np.random.randn(N), index=idx)
if not monotonic:
self.s = self.s.sample(frac=1)
def time_sort_index(self, monotonic):
self.s.sort_index()
def time_get_slice(self, monotonic):
self.s[:10000]
class IrregularOps(object):
goal_time = 0.2
def setup(self):
N = 10**5
idx = date_range(start='1/1/2000', periods=N, freq='s')
s = Series(np.random.randn(N), index=idx)
self.left = s.sample(frac=1)
self.right = s.sample(frac=1)
def time_add(self):
self.left + self.right
class Lookup(object):
goal_time = 0.2
def setup(self):
N = 1500000
rng = date_range(start='1/1/2000', periods=N, freq='S')
self.ts = Series(1, index=rng)
self.lookup_val = rng[N // 2]
def time_lookup_and_cleanup(self):
self.ts[self.lookup_val]
self.ts.index._cleanup()
class ToDatetimeYYYYMMDD(object):
goal_time = 0.2
def setup(self):
rng = date_range(start='1/1/2000', periods=10000, freq='D')
self.stringsD = Series(rng.strftime('%Y%m%d'))
def time_format_YYYYMMDD(self):
to_datetime(self.stringsD, format='%Y%m%d')
class ToDatetimeISO8601(object):
goal_time = 0.2
def setup(self):
rng = date_range(start='1/1/2000', periods=20000, freq='H')
self.strings = rng.strftime('%Y-%m-%d %H:%M:%S').tolist()
self.strings_nosep = rng.strftime('%Y%m%d %H:%M:%S').tolist()
self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800'
for x in rng]
def time_iso8601(self):
to_datetime(self.strings)
def time_iso8601_nosep(self):
to_datetime(self.strings_nosep)
def time_iso8601_format(self):
to_datetime(self.strings, format='%Y-%m-%d %H:%M:%S')
def time_iso8601_format_no_sep(self):
to_datetime(self.strings_nosep, format='%Y%m%d %H:%M:%S')
def time_iso8601_tz_spaceformat(self):
to_datetime(self.strings_tz_space)
class ToDatetimeFormat(object):
goal_time = 0.2
def setup(self):
self.s = Series(['19MAY11', '19MAY11:00:00:00'] * 100000)
self.s2 = self.s.str.replace(':\\S+$', '')
def time_exact(self):
to_datetime(self.s2, format='%d%b%y')
def time_no_exact(self):
to_datetime(self.s, format='%d%b%y', exact=False)
class ToDatetimeCache(object):
goal_time = 0.2
params = [True, False]
param_names = ['cache']
def setup(self, cache):
N = 10000
self.unique_numeric_seconds = list(range(N))
self.dup_numeric_seconds = [1000] * N
self.dup_string_dates = ['2000-02-11'] * N
self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * N
def time_unique_seconds_and_unit(self, cache):
to_datetime(self.unique_numeric_seconds, unit='s', cache=cache)
def time_dup_seconds_and_unit(self, cache):
to_datetime(self.dup_numeric_seconds, unit='s', cache=cache)
def time_dup_string_dates(self, cache):
to_datetime(self.dup_string_dates, cache=cache)
def time_dup_string_dates_and_format(self, cache):
to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=cache)
def time_dup_string_tzoffset_dates(self, cache):
to_datetime(self.dup_string_with_tz, cache=cache)
class DatetimeAccessor(object):
def setup(self):
N = 100000
self.series = Series(date_range(start='1/1/2000', periods=N, freq='T'))
def time_dt_accessor(self):
self.series.dt
def time_dt_accessor_normalize(self):
self.series.dt.normalize()
| bsd-3-clause |
datachand/h2o-3 | py2/h2o_gbm.py | 30 | 16328 |
import re, random, math
import h2o_args
import h2o_nodes
import h2o_cmd
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors
def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None, server=False):
if h2o_args.python_username!='kevin':
return
# Force matplotlib to not use any Xwindows backend.
if server:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
print "xList", xList
print "eList", eList
print "fList", fList
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 26}
### plt.rc('font', **font)
plt.rcdefaults()
if eList:
if eListTitle:
plt.title(eListTitle)
plt.figure()
plt.plot (xList, eList)
plt.xlabel(xLabel)
plt.ylabel(eLabel)
plt.draw()
plt.savefig('eplot.jpg',format='jpg')
# Image.open('testplot.jpg').save('eplot.jpg','JPEG')
if fList:
if fListTitle:
plt.title(fListTitle)
plt.figure()
plt.plot (xList, fList)
plt.xlabel(xLabel)
plt.ylabel(fLabel)
plt.draw()
plt.savefig('fplot.jpg',format='jpg')
# Image.open('fplot.jpg').save('fplot.jpg','JPEG')
if eList or fList:
plt.show()
# pretty print a cm that the C
def pp_cm(jcm, header=None):
# header = jcm['header']
# hack col index header for now..where do we get it?
header = ['"%s"'%i for i in range(len(jcm[0]))]
# cm = ' '.join(header)
cm = '{0:<8}'.format('')
for h in header:
cm = '{0}|{1:<8}'.format(cm, h)
cm = '{0}|{1:<8}'.format(cm, 'error')
c = 0
for line in jcm:
lineSum = sum(line)
if c < 0 or c >= len(line):
raise Exception("Error in h2o_gbm.pp_cm. c: %s line: %s len(line): %s jcm: %s" % (c, line, len(line), dump_json(jcm)))
print "c:", c, "line:", line
errorSum = lineSum - line[c]
if (lineSum>0):
err = float(errorSum) / lineSum
else:
err = 0.0
fl = '{0:<8}'.format(header[c])
for num in line: fl = '{0}|{1:<8}'.format(fl, num)
fl = '{0}|{1:<8.2f}'.format(fl, err)
cm = "{0}\n{1}".format(cm, fl)
c += 1
return cm
def pp_cm_summary(cm):
# hack cut and past for now (should be in h2o_gbm.py?
scoresList = cm
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0?
# in any case, tolerate. (it shows up in test.py on poker100)
print "classIndex:", classIndex, "classSum", classSum, "<- why 0?"
else:
if classIndex >= len(s):
print "Why is classindex:", classIndex, 'for s:"', s
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = 100 - classRightPct
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0: pctRight = 100.0 * totalRight/totalScores
else: pctRight = 0.0
print "pctRight:", "%5.2f" % pctRight
pctWrong = 100 - pctRight
print "pctWrong:", "%5.2f" % pctWrong
return pctWrong
# I just copied and changed GBM to GBM. Have to update to match GBM params and responses
def pickRandGbmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGbm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGbm key:", key)
verboseprint("compareToFirstGbm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, forRF=False, noPrint=False):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
x = ",".join(map(str,x))
ignore_x = ",".join(map(str,ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if forRF:
return ignore_x
else:
return x
def showGBMGridResults(GBMResult, expectedErrorMax, classification=True):
# print "GBMResult:", dump_json(GBMResult)
jobs = GBMResult['jobs']
print "GBM jobs:", jobs
for jobnum, j in enumerate(jobs):
_distribution = j['_distribution']
model_key = j['destination_key']
job_key = j['job_key']
# inspect = h2o_cmd.runInspect(key=model_key)
# print "jobnum:", jobnum, dump_json(inspect)
gbmTrainView = h2o_cmd.runGBMView(model_key=model_key)
print "jobnum:", jobnum, dump_json(gbmTrainView)
if classification:
cms = gbmTrainView['gbm_model']['cms']
cm = cms[-1]['_arr'] # take the last one
print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr']
print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr']
pctWrongTrain = pp_cm_summary(cm);
if pctWrongTrain > expectedErrorMax:
raise Exception("Should have < %s error here. pctWrongTrain: %s" % (expectedErrorMax, pctWrongTrain))
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "\nTrain", jobnum, job_key, "\n==========\n", "pctWrongTrain:", pctWrongTrain, "errsLast:", errsLast
print "GBM 'errsLast'", errsLast
print pp_cm(cm)
else:
print "\nTrain", jobnum, job_key, "\n==========\n", "errsLast:", errsLast
print "GBMTrainView errs:", gbmTrainView['gbm_model']['errs']
def simpleCheckGBMView(node=None, gbmv=None, noPrint=False, **kwargs):
if not node:
node = h2o_nodes.nodes[0]
if 'warnings' in gbmv:
warnings = gbmv['warnings']
# catch the 'Failed to converge" for now
for w in warnings:
if not noPrint: print "\nwarning:", w
if ('Failed' in w) or ('failed' in w):
raise Exception(w)
if 'cm' in gbmv:
cm = gbmv['cm'] # only one
else:
if 'gbm_model' in gbmv:
gbm_model = gbmv['gbm_model']
else:
raise Exception("no gbm_model in gbmv? %s" % dump_json(gbmv))
cms = gbm_model['cms']
print "number of cms:", len(cms)
print "FIX! need to add reporting of h2o's _perr per class error"
# FIX! what if regression. is rf only classification?
print "cms[-1]['_arr']:", cms[-1]['_arr']
print "cms[-1]['_predErr']:", cms[-1]['_predErr']
print "cms[-1]['_classErr']:", cms[-1]['_classErr']
## print "cms[-1]:", dump_json(cms[-1])
## for i,c in enumerate(cms):
## print "cm %s: %s" % (i, c['_arr'])
cm = cms[-1]['_arr'] # take the last one
scoresList = cm
used_trees = gbm_model['N']
errs = gbm_model['errs']
print "errs[0]:", errs[0]
print "errs[-1]:", errs[-1]
print "errs:", errs
# if we got the ntree for comparison. Not always there in kwargs though!
param_ntrees = kwargs.get('ntrees',None)
if (param_ntrees is not None and used_trees != param_ntrees):
raise Exception("used_trees should == param_ntree. used_trees: %s" % used_trees)
if (used_trees+1)!=len(cms) or (used_trees+1)!=len(errs):
raise Exception("len(cms): %s and len(errs): %s should be one more than N %s trees" % (len(cms), len(errs), used_trees))
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0? does GBM CM have entries for non-existent classes
# in a range??..in any case, tolerate. (it shows up in test.py on poker100)
if not noPrint: print "class:", classIndex, "classSum", classSum, "<- why 0?"
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = round(100 - classRightPct, 2)
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
if not noPrint: print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
#****************************
if not noPrint:
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0:
pctRight = 100.0 * totalRight/totalScores
else:
pctRight = 0.0
pctWrong = 100 - pctRight
print "pctRight:", "%5.2f" % pctRight
print "pctWrong:", "%5.2f" % pctWrong
#****************************
# more testing for GBMView
# it's legal to get 0's for oobe error # if sample_rate = 1
sample_rate = kwargs.get('sample_rate', None)
validation = kwargs.get('validation', None)
if (sample_rate==1 and not validation):
pass
elif (totalScores<=0 or totalScores>5e9):
raise Exception("scores in GBMView seems wrong. scores:", scoresList)
varimp = gbm_model['varimp']
treeStats = gbm_model['treeStats']
if not treeStats:
raise Exception("treeStats not right?: %s" % dump_json(treeStats))
# print "json:", dump_json(gbmv)
data_key = gbm_model['_dataKey']
model_key = gbm_model['_key']
classification_error = pctWrong
if not noPrint:
if 'minLeaves' not in treeStats or not treeStats['minLeaves']:
raise Exception("treeStats seems to be missing minLeaves %s" % dump_json(treeStats))
print """
Leaves: {0} / {1} / {2}
Depth: {3} / {4} / {5}
Err: {6:0.2f} %
""".format(
treeStats['minLeaves'],
treeStats['meanLeaves'],
treeStats['maxLeaves'],
treeStats['minDepth'],
treeStats['meanDepth'],
treeStats['maxDepth'],
classification_error,
)
### modelInspect = node.inspect(model_key)
dataInspect = h2o_cmd.runInspect(key=data_key)
check_sandbox_for_errors()
return (round(classification_error,2), classErrorPctList, totalScores)
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/matplotlib/sphinxext/mathmpl.py | 12 | 3822 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
from hashlib import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = '/'.join((setup.app.builder.imgpath, 'mathmpl'))
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math,
html=(visit_latex_math_html, depart_latex_math_html),
latex=(visit_latex_math_latex, depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
| mit |
EtienneCmb/brainpipe | brainpipe/connectivity/correction.py | 1 | 8976 | """Connectivity correction function."""
import numpy as np
import pandas as pd
def _axes_correction(axis, ndim, num):
"""Get a slice at a specific axis."""
axes = [slice(None)] * ndim
axes[axis] = num
return tuple(axes)
def get_pairs(n, part='upper', as_array=True):
"""Get connectivity pairs of the upper triangle.
Parameters
----------
n : int
Number of electrodes.
part : {'upper', 'lower', 'both'}
Part of the connectivity array to get.
as_array : bool | True
Specifify if returned pairs should be a (n_pairs, 2) array or a tuple
of indices.
Returns
-------
pairs : array_like
A (n_pairs, 2) array of integers.
"""
assert part in ['upper', 'lower', 'both']
if part == 'upper':
idx = np.triu_indices(n, k=1)
elif part == 'lower':
idx = np.tril_indices(n, k=-1)
elif part == 'both':
high = np.c_[np.triu_indices(n, k=1)]
low = np.c_[np.tril_indices(n, k=-1)]
_idx = np.r_[high, low]
idx = (_idx[:, 0], _idx[:, 1])
if as_array:
return np.c_[idx]
else:
return idx
def remove_site_contact(mat, channels, mode='soft', remove_lower=False,
symmetrical=False):
"""Remove proximate contacts for SEEG electrodes in a connectivity array.
Parameters
----------
mat : array_like
A (n_elec, n_elec) array of connectivity.
channels : list
List of channel names of length n_elec.
mode : {'soft', 'hard'}
Use 'soft' to only remove successive contacts and 'hard' to remove all
connectivty that come from the same electrode.
remove_lower : bool | False
Remove lower triangle.
symmetrical : bool | False
Get a symmetrical mask.
Returns
-------
select : array_like
Array of boolean values with True values in the array that need to be
removed.
"""
from re import findall
n_elec = len(channels)
assert (mat.shape == (n_elec, n_elec)) and mode in ['soft', 'hard']
# Define the boolean array to return :
select = np.zeros_like(mat, dtype=bool)
# Find site letter and num :
r = [[findall(r'\D+', k)[0]] + findall(r'\d+', k) for k in channels]
r = np.asarray(r)
for i, k in enumerate(r):
letter, digit_1, digit_2 = [k[0], int(k[1]), int(k[2])]
if mode is 'soft':
next_contact = [letter, str(digit_1 + 1), str(digit_2 + 1)]
to_remove = np.all(r == next_contact, axis=1)
else:
to_remove = r[:, 0] == letter
to_remove[i] = False
select[i, to_remove] = True
# Remove lower triangle :
select[np.tril_indices(n_elec)] = remove_lower
# Symmetrical render :
if symmetrical:
select = symmetrize(select.astype(int)).astype(bool)
select[np.diag_indices(n_elec)] = True
return select
def anat_based_reorder(c, df, col, part='upper'):
"""Reorder and connectivity array according to anatomy.
Parameters
----------
c : array_like
Array of (N, N) connectivity.
df : pd.DataFrame
DataFrame containing anamical informations.
col : str
Name of the column to use in the DataFrame.
part : {'upper', 'lower', 'both'}
Part of the connectivity array to get.
Returns
-------
c_r : array_like
Anat based reorganized connectivity array.
labels : array_like
Array of reorganized labels.
index : array_like
Array of indices used for the reorganization.
"""
assert isinstance(c, np.ndarray) and c.ndim == 2
assert col in df.keys()
n_elec = c.shape[0]
# Group DataFrame column :
grp = df.groupby(col).groups
labels = list(df.keys())
index = np.concatenate([list(k) for k in grp.values()])
# Get pairs :
pairs = np.c_[get_pairs(n_elec, part=part, as_array=False)]
# Reconstruct the array :
c_r = np.zeros_like(c)
for k, i in pairs:
row, col = min(index[k], index[i]), max(index[k], index[i])
c_r[row, col] = c[k, i]
return c_r, labels, index
def anat_based_mean(x, df, col, fill_with=0., xyz=None):
"""Get mean of a connectivity array according to anatomical structures.
Parameters
----------
x : array_like
Array of (N, N) connectivity.
df : pd.DataFrame
DataFrame containing anamical informations.
col : str
Name of the column to use in the DataFrame.
fill_with : float | 0.
Fill non-connectivity values.
xyz : array_like | None
Array of coordinate of each electrode.
Returns
-------
x_r : array_like
Mean array of connectivity inside structures.
labels : array_like
Array of labels used to take the mean.
xyz_r : array_like
Array of mean coordinates. Return only if `xyz` is not None.
"""
assert isinstance(x, np.ndarray) and x.ndim == 2
assert col in df.keys()
# Get labels and roi's indices :
gp = df.groupby(col, sort=False).groups
labels, rois = list(gp.keys()), list(gp.values())
n_roi = len(labels)
# Process the connectivity array :
np.fill_diagonal(x, 0.)
is_masked = np.ma.is_masked(x)
if is_masked:
x.mask = np.triu(x.mask)
np.fill_diagonal(x.mask, True)
x += x.T
con = np.ma.ones((n_roi, n_roi), dtype=float)
else:
x = np.triu(x)
x += x.T
con = np.zeros((n_roi, n_roi), dtype=float)
# Take the mean inside rois :
for r, rows in enumerate(rois):
_r = np.array(rows).reshape(-1, 1)
for c, cols in enumerate(rois):
con[r, c] = x[_r, np.array(cols)].mean()
# xyz coordinates :
if xyz is None:
return con, list(labels)
elif isinstance(xyz, np.ndarray) and len(df) == xyz.shape[0]:
df['X'], df['Y'], df['Z'] = xyz[:, 0], xyz[:, 1], xyz[:, 2]
df = df.groupby(col, sort=False).mean().reset_index().set_index(col)
df = df.loc[labels].reset_index()
return con, list(labels), np.array(df[['X', 'Y', 'Z']])
def ravel_connect(connect, part='upper'):
"""Ravel a connectivity array.
Parameters
----------
connect : array_like
Connectivity array of shape (n_sites, n_sites) to ravel
part : {'upper', 'lower', 'both'}
Part of the connectivity array to get.
Returns
-------
connect : array_like
Ravel version of the connectivity array.
"""
assert isinstance(connect, np.ndarray) and (connect.ndim == 2)
assert connect.shape[0] == connect.shape[1]
pairs = get_pairs(connect.shape[0], part=part, as_array=False)
return connect[pairs]
def unravel_connect(connect, n_sites, part='upper'):
"""Unravel a connectivity array.
Parameters
----------
connect : array_like
Connectivity array of shape (n_sites, n_sites) to ravel
n_sites : int
Number of sites in the connectivity array.
part : {'upper', 'lower', 'both'}
Part of the connectivity array to get.
Returns
-------
connect : array_like
Unravel version of the connectivity array.
"""
assert isinstance(connect, np.ndarray) and (connect.ndim == 1)
pairs = get_pairs(n_sites, part=part, as_array=False)
connect_ur = np.zeros((n_sites, n_sites), dtype=connect.dtype)
connect_ur[pairs[0], pairs[1]] = connect
return connect_ur
def symmetrize(arr):
"""Make an array symmetrical.
Parameters
----------
arr : array_like
Connectivity array of shape (n_sources, n_sources)
Returns
-------
arr : array_like
Symmetrical connectivity array.
"""
assert isinstance(arr, np.ndarray)
assert (arr.ndim == 2) and (arr.shape[0] == arr.shape[1])
return arr + arr.T - np.diag(arr.diagonal())
def concat_connect(connect, fill_with=0.):
"""Concatenate connectivity arrays.
Parameters
----------
connect : list, tuple
List of connectivity arrays.
fill_with : float | 0.
Fill value.
Returns
-------
aconnect : array_like
Merged connectivity arrays.
"""
assert isinstance(connect, (list, tuple)), ("`connect` should either be a "
"list or a tuple of arrays")
assert np.all([k.ndim == 2 for k in connect]), ("`connect` sould be a list"
" of 2d arrays")
# Shape inspection :
shapes = [k.shape[0] for k in connect]
sh = np.sum([shapes])
aconnect = np.full((sh, sh), fill_with, dtype=float)
# Inspect if any masked array :
if np.any([np.ma.is_masked(k) for k in connect]):
aconnect = np.ma.masked_array(aconnect, mask=True)
# Merge arrays :
q = 0
for k, (c, s) in enumerate(zip(connect, shapes)):
sl = slice(q, q + s)
aconnect[sl, sl] = c
q += s
return aconnect
| gpl-3.0 |
bradleyhd/netsim | graph_degree_graph.py | 1 | 1378 | import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.optimize import curve_fit
def linear(x, a, b):
return a * x + b
def quadratic(x, a, b, c):
return a * x**2 + b * x + c
#plt.figure(num=None, figsize=(16, 12), dpi=300, facecolor='w', edgecolor='k')
plt.figure()
xs = [[1339, 4801, 11417, 35938, 111092, 244349], [1339, 4801, 11417, 35938, 111092, 244349]]
ys = [[1.5832710978342046, 1.6838158716933973, 1.8605588158009985, 2.0703155434359175, 1.8582706225470782, 1.7940773238278036], [1.5653472740851382, 1.6263278483649239, 1.7325917491460103, 1.8476264678056653, 1.7342112843409065, 1.6993030460529817]]
y1 = np.array(ys[0])
x1 = np.array(xs[0])
xl1 = np.linspace(0, np.max(x1), 50)
popt, pcov = curve_fit(quadratic, x1, y1)
plt.plot(x1, y1, 'r^', label='EDS')
plt.plot(xl1, quadratic(xl1, *popt), 'r--')
y2 = np.array(ys[1])
x2 = np.array(xs[1])
xl2 = np.linspace(0, np.max(x2), 50)
popt, pcov = curve_fit(quadratic, x2, y2)
plt.plot(x2, y2, 'bv', label='D')
plt.plot(xl2, quadratic(xl2, *popt), 'b--')
plt.title('Effects of Node Ordering on Mean Outdegree')
plt.xlabel('$\\vert V\/\\vert$')
plt.ylabel('Mean Outdegree in $G^\\uparrow$')
plt.legend(loc=0, numpoints=1)
axes = plt.gca()
# axes.set_xlim([0, np.max(np.append(xs1, xs2)) * 1.1])
# axes.set_ylim([0, np.max(np.append(ys1, ys2)) * 1.1])
#plt.savefig('test.pdf')
plt.show() | gpl-3.0 |
nens/tslib | setup.py | 1 | 1073 | from setuptools import setup
version = '0.0.10.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'ciso8601',
'lxml',
'numpy',
'pandas',
'pytz',
'setuptools',
'xmltodict',
],
tests_require = [
]
setup(name='tslib',
version=version,
description="A library for manipulating time series",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python'],
keywords=[],
author='Carsten Byrman',
author_email='[email protected]',
url='http://www.nelen-schuurmans.nl',
license='MIT',
packages=['tslib'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
entry_points={
'console_scripts': [
]},
)
| mit |
pprett/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
Leemoonsoo/incubator-zeppelin | python/src/main/resources/python/zeppelin_python.py | 19 | 6945 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
import ast
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PythonCompletion:
def __init__(self, interpreter, userNameSpace):
self.interpreter = interpreter
self.userNameSpace = userNameSpace
def getObjectCompletion(self, text_value):
completions = [completion for completion in list(self.userNameSpace.keys()) if completion.startswith(text_value)]
builtinCompletions = [completion for completion in dir(__builtins__) if completion.startswith(text_value)]
return completions + builtinCompletions
def getMethodCompletion(self, objName, methodName):
execResult = locals()
try:
exec("{} = dir({})".format("objectDefList", objName), _zcUserQueryNameSpace, execResult)
except:
self.interpreter.logPythonOutput("Fail to run dir on " + objName)
self.interpreter.logPythonOutput(traceback.format_exc())
return None
else:
objectDefList = execResult['objectDefList']
return [completion for completion in execResult['objectDefList'] if completion.startswith(methodName)]
def getCompletion(self, text_value):
if text_value == None:
return None
dotPos = text_value.find(".")
if dotPos == -1:
objName = text_value
completionList = self.getObjectCompletion(objName)
else:
objName = text_value[:dotPos]
methodName = text_value[dotPos + 1:]
completionList = self.getMethodCompletion(objName, methodName)
if completionList is None or len(completionList) <= 0:
self.interpreter.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreter.setStatementsFinished(result, False)
host = sys.argv[1]
port = int(sys.argv[2])
if "PY4J_GATEWAY_SECRET" in os.environ:
from py4j.java_gateway import GatewayParameters
gateway_secret = os.environ["PY4J_GATEWAY_SECRET"]
gateway = JavaGateway(gateway_parameters=GatewayParameters(
address=host, port=port, auth_token=gateway_secret, auto_convert=True))
else:
gateway = JavaGateway(GatewayClient(address=host, port=port), auto_convert=True)
intp = gateway.entry_point
_zcUserQueryNameSpace = {}
completion = PythonCompletion(intp, _zcUserQueryNameSpace)
_zcUserQueryNameSpace["__zeppelin_completion__"] = completion
_zcUserQueryNameSpace["gateway"] = gateway
from zeppelin_context import PyZeppelinContext
if intp.getZeppelinContext():
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
intp.onPythonScriptInitialized(os.getpid())
# redirect stdout/stderr to java side so that PythonInterpreter can capture the python execution result
output = Logger()
sys.stdout = output
sys.stderr = output
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
isForCompletion = req.isForCompletion()
# Get post-execute hooks
try:
if req.isCallHooks():
global_hook = intp.getHook('post_exec_dev')
else:
global_hook = None
except:
global_hook = None
try:
if req.isCallHooks():
user_hook = __zeppelin__.getHook('post_exec')
else:
user_hook = None
except:
user_hook = None
nhooks = 0
if not isForCompletion:
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]] if len(code.body) > nhooks else [])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
if not isForCompletion:
# only call it when it is not for code completion. code completion will call it in
# PythonCompletion.getCompletion
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
if not isForCompletion:
# extract which line incur error from error message. e.g.
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ZeroDivisionError: integer division or modulo by zero
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
pyoceans/python-ctd | ctd/read.py | 1 | 14942 | """
Read module
"""
import bz2
import collections
import gzip
import linecache
import re
import warnings
import zipfile
from datetime import datetime
from io import StringIO
from pathlib import Path
import gsw
import numpy as np
import pandas as pd
def _basename(fname):
"""Return file name without path."""
if not isinstance(fname, Path):
fname = Path(fname)
path, name, ext = fname.parent, fname.stem, fname.suffix
return path, name, ext
def _normalize_names(name):
"""Normalize column names."""
name = name.strip()
name = name.strip("*")
return name
def _open_compressed(fname):
"""Open compressed gzip, gz, zip or bz2 files."""
extension = fname.suffix.casefold()
if extension in [".gzip", ".gz"]:
cfile = gzip.open(str(fname))
elif extension == ".bz2":
cfile = bz2.BZ2File(str(fname))
elif extension == ".zip":
# NOTE: Zip format may contain more than one file in the archive
# (similar to tar), here we assume that there is just one file per
# zipfile! Also, we ask for the name because it can be different from
# the zipfile file!!
zfile = zipfile.ZipFile(str(fname))
name = zfile.namelist()[0]
cfile = zfile.open(name)
else:
raise ValueError(
"Unrecognized file extension. Expected .gzip, .bz2, or .zip, got {}".format(
extension,
),
)
contents = cfile.read()
cfile.close()
return contents
def _read_file(fname):
"""Read file contents."""
if not isinstance(fname, Path):
fname = Path(fname).resolve()
extension = fname.suffix.casefold()
if extension in [".gzip", ".gz", ".bz2", ".zip"]:
contents = _open_compressed(fname)
elif extension in [".cnv", ".edf", ".txt", ".ros", ".btl"]:
contents = fname.read_bytes()
else:
raise ValueError(
f"Unrecognized file extension. Expected .cnv, .edf, .txt, .ros, or .btl got {extension}",
)
# Read as bytes but we need to return strings for the parsers.
text = contents.decode(encoding="utf-8", errors="replace")
return StringIO(text)
def _remane_duplicate_columns(names):
"""Rename a column when it is duplicated."""
items = collections.Counter(names).items()
dup = []
for item, count in items:
if count > 2:
raise ValueError(
f"Cannot handle more than two duplicated columns. Found {count} for {item}.",
)
if count > 1:
dup.append(item)
second_occurrences = [names[::-1].index(item) for item in dup]
for idx in second_occurrences:
idx += 1
names[idx] = f"{names[idx]}_"
return names
def _parse_seabird(lines, ftype):
"""Parse searbird formats."""
# Initialize variables.
lon = lat = time = None, None, None
skiprows = 0
metadata = {}
header, config, names = [], [], []
for k, line in enumerate(lines):
line = line.strip()
# Only cnv has columns names, for bottle files we will use the variable row.
if ftype == "cnv":
if "# name" in line:
name, unit = line.split("=")[1].split(":")
name, unit = list(map(_normalize_names, (name, unit)))
names.append(name)
# Seabird headers starts with *.
if line.startswith("*"):
header.append(line)
# Seabird configuration starts with #.
if line.startswith("#"):
config.append(line)
# NMEA position and time.
if "NMEA Latitude" in line:
hemisphere = line[-1]
lat = line.strip(hemisphere).split("=")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA Longitude" in line:
hemisphere = line[-1]
lon = line.strip(hemisphere).split("=")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
if "NMEA UTC (Time)" in line:
time = line.split("=")[-1].strip()
# Should use some fuzzy datetime parser to make this more robust.
time = datetime.strptime(time, "%b %d %Y %H:%M:%S")
# cnv file header ends with *END* while
if ftype == "cnv":
if line == "*END*":
skiprows = k + 1
break
else: # btl.
# There is no *END* like in a .cnv file, skip two after header info.
if not (line.startswith("*") | line.startswith("#")):
# Fix commonly occurring problem when Sbeox.* exists in the file
# the name is concatenated to previous parameter
# example:
# CStarAt0Sbeox0Mm/Kg to CStarAt0 Sbeox0Mm/Kg (really two different params)
line = re.sub(r"(\S)Sbeox", "\\1 Sbeox", line)
names = line.split()
skiprows = k + 2
break
if ftype == "btl":
# Capture stat names column.
names.append("Statistic")
metadata.update(
{
"header": "\n".join(header),
"config": "\n".join(config),
"names": _remane_duplicate_columns(names),
"skiprows": skiprows,
"time": time,
"lon": lon,
"lat": lat,
},
)
return metadata
def from_bl(fname):
"""Read Seabird bottle-trip (bl) file
Example
-------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> df = ctd.from_bl(str(data_path.joinpath('bl', 'bottletest.bl')))
>>> df._metadata["time_of_reset"]
datetime.datetime(2018, 6, 25, 20, 8, 55)
"""
df = pd.read_csv(
fname,
skiprows=2,
parse_dates=[1],
index_col=0,
names=["bottle_number", "time", "startscan", "endscan"],
)
df._metadata = {
"time_of_reset": pd.to_datetime(
linecache.getline(str(fname), 2)[6:-1],
).to_pydatetime(),
}
return df
def from_btl(fname):
"""
DataFrame constructor to open Seabird CTD BTL-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> bottles = ctd.from_btl(data_path.joinpath('btl', 'bottletest.btl'))
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="btl")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=False,
names=metadata["names"],
parse_dates=False,
skiprows=metadata["skiprows"],
)
f.close()
# At this point the data frame is not correctly lined up (multiple rows
# for avg, std, min, max or just avg, std, etc).
# Also needs date,time,and bottle number to be converted to one per line.
# Get row types, see what you have: avg, std, min, max or just avg, std.
rowtypes = df[df.columns[-1]].unique()
# Get times and dates which occur on second line of each bottle.
dates = df.iloc[:: len(rowtypes), 1].reset_index(drop=True)
times = df.iloc[1 :: len(rowtypes), 1].reset_index(drop=True)
datetimes = dates + " " + times
# Fill the Date column with datetimes.
df.loc[:: len(rowtypes), "Date"] = datetimes.values
df.loc[1 :: len(rowtypes), "Date"] = datetimes.values
# Fill missing rows.
df["Bottle"] = df["Bottle"].fillna(method="ffill")
df["Date"] = df["Date"].fillna(method="ffill")
df["Statistic"] = df["Statistic"].str.replace(r"\(|\)", "") # (avg) to avg
name = _basename(fname)[1]
dtypes = {
"bpos": int,
"pumps": bool,
"flag": bool,
"Bottle": int,
"Scan": int,
"Statistic": str,
"Date": str,
}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
df["Date"] = pd.to_datetime(df["Date"])
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_edf(fname):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_edf(data_path.joinpath('XBT.EDF.gz'))
>>> ax = cast['temperature'].plot_cast()
"""
f = _read_file(fname)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
try:
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
except (IndexError, ValueError):
lat = None
elif line.startswith("Longitude"):
try:
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
except (IndexError, ValueError):
lon = None
else:
header.append(line)
if line.startswith("Field"):
col, unit = (ln.strip().casefold() for ln in line.split(":"))
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
df = pd.read_csv(
f,
header=None,
index_col=None,
names=names,
skiprows=skiprows,
delim_whitespace=True,
)
f.close()
df.set_index("depth", drop=True, inplace=True)
df.index.name = "Depth [m]"
name = _basename(fname)[1]
metadata = {
"lon": lon,
"lat": lat,
"name": str(name),
"header": "\n".join(header),
"serial": serial,
}
setattr(df, "_metadata", metadata)
return df
def from_cnv(fname):
"""
DataFrame constructor to open Seabird CTD CNV-ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_cnv(data_path.joinpath('CTD_big.cnv.bz2'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['t090C'].plot_cast()
"""
f = _read_file(fname)
metadata = _parse_seabird(f.readlines(), ftype="cnv")
f.seek(0)
df = pd.read_fwf(
f,
header=None,
index_col=None,
names=metadata["names"],
skiprows=metadata["skiprows"],
delim_whitespace=True,
widths=[11] * len(metadata["names"]),
)
f.close()
prkeys = ["prM ", "prE", "prDM", "pr50M", "pr50M1", "prSM", "prdM", "pr", "depSM"]
prkey = [key for key in prkeys if key in df.columns]
if len(prkey) != 1:
raise ValueError(f"Expected one pressure/depth column, got {prkey}.")
df.set_index(prkey, drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
if prkey == "depSM":
lat = metadata.get("lat", None)
if lat is not None:
df.index = gsw.p_from_z(
df.index,
lat,
geo_strf_dyn_height=0,
sea_surface_geopotential=0,
)
else:
warnings.war(
f"Missing latitude information. Cannot compute pressure! Your index is {prkey}, "
"please compute pressure manually with `gsw.p_from_z` and overwrite your index.",
)
df.index.name = prkey
name = _basename(fname)[1]
dtypes = {"bpos": int, "pumps": bool, "flag": bool}
for column in df.columns:
if column in dtypes:
df[column] = df[column].astype(dtypes[column])
else:
try:
df[column] = df[column].astype(float)
except ValueError:
warnings.warn("Could not convert %s to float." % column)
metadata["name"] = str(name)
setattr(df, "_metadata", metadata)
return df
def from_fsi(fname, skiprows=9):
"""
DataFrame constructor to open Falmouth Scientific, Inc. (FSI) CTD
ASCII format.
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> cast = ctd.from_fsi(data_path.joinpath('FSI.txt.gz'))
>>> downcast, upcast = cast.split()
>>> ax = downcast['TEMP'].plot_cast()
"""
f = _read_file(fname)
df = pd.read_csv(
f,
header="infer",
index_col=None,
skiprows=skiprows,
dtype=float,
delim_whitespace=True,
)
f.close()
df.set_index("PRES", drop=True, inplace=True)
df.index.name = "Pressure [dbar]"
metadata = {"name": str(fname)}
setattr(df, "_metadata", metadata)
return df
def rosette_summary(fname):
"""
Make a BTL (bottle) file from a ROS (bottle log) file.
More control for the averaging process and at which step we want to
perform this averaging eliminating the need to read the data into SBE
Software again after pre-processing.
NOTE: Do not run LoopEdit on the upcast!
Examples
--------
>>> from pathlib import Path
>>> import ctd
>>> data_path = Path(__file__).parents[1].joinpath("tests", "data")
>>> fname = data_path.joinpath('CTD/g01l01s01.ros')
>>> ros = ctd.rosette_summary(fname)
>>> ros = ros.groupby(ros.index).mean()
>>> ros.pressure.values.astype(int)
array([835, 806, 705, 604, 503, 404, 303, 201, 151, 100, 51, 1])
"""
ros = from_cnv(fname)
ros["pressure"] = ros.index.values.astype(float)
ros["nbf"] = ros["nbf"].astype(int)
ros.set_index("nbf", drop=True, inplace=True, verify_integrity=False)
return ros
| bsd-3-clause |
procoder317/scikit-learn | examples/model_selection/randomized_search.py | 201 | 3214 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
random forest.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is slightly worse for the randomized search, though this
is most likely a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
print(__doc__)
import numpy as np
from time import time
from operator import itemgetter
from scipy.stats import randint as sp_randint
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
# get some data
digits = load_digits()
X, y = digits.data, digits.target
# build a classifier
clf = RandomForestClassifier(n_estimators=20)
# Utility function to report best scores
def report(grid_scores, n_top=3):
top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]
for i, score in enumerate(top_scores):
print("Model with rank: {0}".format(i + 1))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
score.mean_validation_score,
np.std(score.cv_validation_scores)))
print("Parameters: {0}".format(score.parameters))
print("")
# specify parameters and distributions to sample from
param_dist = {"max_depth": [3, None],
"max_features": sp_randint(1, 11),
"min_samples_split": sp_randint(1, 11),
"min_samples_leaf": sp_randint(1, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run randomized search
n_iter_search = 20
random_search = RandomizedSearchCV(clf, param_distributions=param_dist,
n_iter=n_iter_search)
start = time()
random_search.fit(X, y)
print("RandomizedSearchCV took %.2f seconds for %d candidates"
" parameter settings." % ((time() - start), n_iter_search))
report(random_search.grid_scores_)
# use a full grid over all parameters
param_grid = {"max_depth": [3, None],
"max_features": [1, 3, 10],
"min_samples_split": [1, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.grid_scores_)))
report(grid_search.grid_scores_)
| bsd-3-clause |
DiCarloLab-Delft/PycQED_py3 | pycqed/instrument_drivers/physical_instruments/ZurichInstruments/ZI_base_instrument.py | 1 | 62188 | import json
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import logging
import re
from datetime import datetime
from qcodes.instrument.base import Instrument
from qcodes.utils import validators
from qcodes.instrument.parameter import ManualParameter
import zhinst.ziPython as zi
log = logging.getLogger(__name__)
##########################################################################
# Module level functions
##########################################################################
def gen_waveform_name(ch, cw):
"""
Return a standard waveform name based on channel and codeword number.
Note the use of 1-based indexing of the channels. To clarify, the
'ch' argument to this function is 0-based, but the naming of the actual
waveforms as well as the signal outputs of the instruments are 1-based.
The function will map 'logical' channel 0 to physical channel 1, and so on.
"""
return 'wave_ch{}_cw{:03}'.format(ch+1, cw)
def gen_partner_waveform_name(ch, cw):
"""
Return a standard waveform name for the partner waveform of a dual-channel
waveform. The physical channel indexing is 1-based where as the logical channel
indexing (i.e. the argument to this function) is 0-based. To clarify, the
'ch' argument to this function is 0-based, but the naming of the actual
waveforms as well as the signal outputs of the instruments are 1-based.
The function will map 'logical' channel 0 to physical channel 1, and so on.
"""
return gen_waveform_name(2*(ch//2) + ((ch + 1) % 2), cw)
def merge_waveforms(chan0=None, chan1=None, marker=None):
"""
Merges waveforms for channel 0, channel 1 and marker bits into a single
numpy array suitable for being written to the instrument. Channel 1 and marker
data is optional. Use named arguments to combine, e.g. channel 0 and marker data.
"""
chan0_uint = None
chan1_uint = None
marker_uint = None
# The 'array_format' variable is used internally in this function in order to
# control the order and number of uint16 words that we put together for each
# sample of the final array. The variable is essentially interpreted as a bit
# mask where each bit indicates which channels/marker values to include in
# the final array. Bit 0 for chan0 data, 1 for chan1 data and 2 for marker data.
array_format = 0
if chan0 is not None:
chan0_uint = np.array((np.power(2, 15)-1)*chan0, dtype=np.uint16)
array_format += 1
if chan1 is not None:
chan1_uint = np.array((np.power(2, 15)-1)*chan1, dtype=np.uint16)
array_format += 2
if marker is not None:
marker_uint = np.array(marker, dtype=np.uint16)
array_format += 4
if array_format == 1:
return chan0_uint
elif array_format == 2:
return chan1_uint
elif array_format == 3:
return np.vstack((chan0_uint, chan1_uint)).reshape((-2,), order='F')
elif array_format == 4:
return marker_uint
elif array_format == 5:
return np.vstack((chan0_uint, marker_uint)).reshape((-2,), order='F')
elif array_format == 6:
return np.vstack((chan1_uint, marker_uint)).reshape((-2,), order='F')
elif array_format == 7:
return np.vstack((chan0_uint, chan1_uint, marker_uint)).reshape((-2,), order='F')
else:
return []
def plot_timing_diagram(data, bits, line_length=30):
"""
Takes list of 32-bit integer values as read from the 'raw/dios/0/data' device nodes and creates
a timing diagram of the result. The timing diagram can be used for verifying that e.g. the
strobe signal (A.K.A the toggle signal) is periodic.
"""
def _plot_lines(ax, pos, *args, **kwargs):
if ax == 'x':
for p in pos:
plt.axvline(p, *args, **kwargs)
else:
for p in pos:
plt.axhline(p, *args, **kwargs)
def _plot_timing_diagram(data, bits):
plt.figure(figsize=(20, 0.5*len(bits)))
t = np.arange(len(data))
_plot_lines('y', 2*np.arange(len(bits)), color='.5', linewidth=2)
_plot_lines('x', t[0:-1:2], color='.5', linewidth=0.5)
for n, i in enumerate(reversed(bits)):
line = [((x >> i) & 1) for x in data]
plt.step(t, np.array(line) + 2*n, 'r', linewidth=2, where='post')
plt.text(-0.5, 2*n, str(i))
plt.xlim([t[0], t[-1]])
plt.ylim([0, 2*len(bits)+1])
plt.gca().axis('off')
plt.show()
while len(data) > 0:
if len(data) > line_length:
d = data[0:line_length]
data = data[line_length:]
else:
d = data
data = []
_plot_timing_diagram(d, bits)
def plot_codeword_diagram(ts, cws, range=None):
"""
Takes a list of timestamps (X) and codewords (Y) and produces a simple 'stem' plot of the two.
The plot is useful for visually checking that the codewords are detected at regular intervals.
Can also be used for visual verification of standard codeword patterns such as the staircase used
for calibration.
"""
plt.figure(figsize=(20, 10))
plt.stem((np.array(ts)-ts[0])*10.0/3, np.array(cws))
if range is not None:
plt.xlim(range[0], range[1])
xticks = np.arange(range[0], range[1], step=20)
while len(xticks) > 20:
xticks = xticks[::2]
plt.xticks(xticks)
plt.xlabel('Time (ns)')
plt.ylabel('Codeword (#)')
plt.grid()
plt.show()
def _gen_set_cmd(dev_set_func, node_path: str):
"""
Generates a set function based on the dev_set_type method (e.g., seti)
and the node_path (e.g., '/dev8003/sigouts/1/mode'
"""
def set_cmd(val):
return dev_set_func(node_path, val)
return set_cmd
def _gen_get_cmd(dev_get_func, node_path: str):
"""
Generates a get function based on the dev_set_type method (e.g., geti)
and the node_path (e.g., '/dev8003/sigouts/1/mode'
"""
def get_cmd():
return dev_get_func(node_path)
return get_cmd
##########################################################################
# Exceptions
##########################################################################
class ziDAQError(Exception):
"""Exception raised when no DAQ has been connected."""
pass
class ziModuleError(Exception):
"""Exception raised when a module generates an error."""
pass
class ziValueError(Exception):
"""Exception raised when a wrong or empty value is returned."""
pass
class ziCompilationError(Exception):
"""Exception raised when an AWG program fails to compile."""
pass
class ziDeviceError(Exception):
"""Exception raised when a class is used with the wrong device type."""
pass
class ziOptionsError(Exception):
"""Exception raised when a device does not have the right options installed."""
pass
class ziVersionError(Exception):
"""Exception raised when a device does not have the right firmware versions."""
pass
class ziReadyError(Exception):
"""Exception raised when a device was started which is not ready."""
pass
class ziRuntimeError(Exception):
"""Exception raised when a device detects an error at runtime."""
pass
class ziConfigurationError(Exception):
"""Exception raised when a wrong configuration is detected."""
pass
##########################################################################
# Mock classes
##########################################################################
class MockDAQServer():
"""
This class implements a mock version of the DAQ object used for
communicating with the instruments. It contains dummy declarations of
the most important methods implemented by the server and used by
the instrument drivers.
Important: The Mock server creates some default 'nodes' (basically
just entries in a 'dict') based on the device name that is used when
connecting to a device. These nodes differ depending on the instrument
type, which is determined by the number in the device name: dev2XXX are
UHFQA instruments and dev8XXX are HDAWG8 instruments.
"""
def __init__(self, server, port, apilevel, verbose=False):
self.server = server
self.port = port
self.apilevel = apilevel
self.device = None
self.interface = None
self.nodes = {'/zi/devices/connected': {'type': 'String', 'value': ''}}
self.devtype = None
self.poll_nodes = []
self.verbose = verbose
def awgModule(self):
return MockAwgModule(self)
def setDebugLevel(self, debuglevel: int):
print('Setting debug level to {}'.format(debuglevel))
def connectDevice(self, device, interface):
if self.device is not None:
raise ziDAQError(
'Trying to connect to a device that is already connected!')
if self.interface is not None and self.interface != interface:
raise ziDAQError(
'Trying to change interface on an already connected device!')
self.device = device
self.interface = interface
if self.device.lower().startswith('dev2'):
self.devtype = 'UHFQA'
elif self.device.lower().startswith('dev8'):
self.devtype = 'HDAWG8'
# Add paths
filename = os.path.join(os.path.dirname(os.path.abspath(
__file__)), 'zi_parameter_files', 'node_doc_{}.json'.format(self.devtype))
if not os.path.isfile(filename):
raise ziRuntimeError(
'No parameter file available for devices of type ' + self.devtype)
# NB: defined in parent class
self._load_parameter_file(filename=filename)
# Update connected status
self.nodes['/zi/devices/connected']['value'] = self.device
# Set the LabOne revision
self.nodes['/zi/about/revision'] = {'type': 'Integer', 'value': 200802104}
self.nodes[f'/{self.device}/features/devtype'] = {'type': 'String', 'value': self.devtype}
self.nodes[f'/{self.device}/system/fwrevision'] = {'type': 'Integer', 'value': 99999}
self.nodes[f'/{self.device}/system/fpgarevision'] = {'type': 'Integer', 'value': 99999}
self.nodes[f'/{self.device}/system/slaverevision'] = {'type': 'Integer', 'value': 99999}
if self.devtype == 'UHFQA':
self.nodes[f'/{self.device}/features/options'] = {'type': 'String', 'value': 'QA\nAWG'}
for i in range(16):
self.nodes[f'/{self.device}/awgs/0/waveform/waves/{i}'] = {'type': 'ZIVectorData', 'value': np.array([])}
for i in range(10):
self.nodes[f'/{self.device}/qas/0/integration/weights/{i}/real'] = {'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/qas/0/integration/weights/{i}/imag'] = {'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/qas/0/result/data/{i}/wave'] = {'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/raw/dios/0/delay'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/extclk'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/drive'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/mode'] = {'type': 'Integer', 'value': 0}
elif self.devtype == 'HDAWG8':
self.nodes[f'/{self.device}/features/options'] = {'type': 'String', 'value': 'PC\nME'}
self.nodes[f'/{self.device}/raw/error/json/errors'] = {
'type': 'String', 'value': '{"sequence_nr" : 0, "new_errors" : 0, "first_timestamp" : 0, "timestamp" : 0, "timestamp_utc" : "2019-08-07 17 : 33 : 55", "messages" : []}'}
for i in range(32):
self.nodes['/' + self.device +
'/raw/dios/0/delays/' + str(i) + '/value'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/raw/error/blinkseverity'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/raw/error/blinkforever'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/extclk'] = {'type': 'Integer', 'value': 0}
for awg_nr in range(4):
for i in range(32):
self.nodes[f'/{self.device}/awgs/{awg_nr}/waveform/waves/{i}'] = {
'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/awgs/{awg_nr}/waveform/waves/{i}'] = {
'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/awgs/{awg_nr}/waveform/waves/{i}'] = {
'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/awgs/{awg_nr}/waveform/waves/{i}'] = {
'type': 'ZIVectorData', 'value': np.array([])}
for sigout_nr in range(8):
self.nodes[f'/{self.device}/sigouts/{sigout_nr}/precompensation/fir/coefficients'] = {
'type': 'ZIVectorData', 'value': np.array([])}
self.nodes[f'/{self.device}/dios/0/mode'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/extclk'] = {'type': 'Integer', 'value': 0}
self.nodes[f'/{self.device}/dios/0/drive'] = {'type': 'Integer', 'value': 0}
for dio_nr in range(32):
self.nodes[f'/{self.device}/raw/dios/0/delays/{dio_nr}/value'] = {'type': 'Integer', 'value': 0}
def listNodesJSON(self, path):
pass
def getString(self, path):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.nodes[path]['type'] != 'String':
raise ziRuntimeError(
"Trying to node '" + path + "' as string, but the type is '" + self.nodes[path]['type'] + "'!")
return self.nodes[path]['value']
def getInt(self, path):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.verbose:
print('getInt', path, int(self.nodes[path]['value']))
return int(self.nodes[path]['value'])
def getDouble(self, path):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.verbose:
print('getDouble', path, float(self.nodes[path]['value']))
return float(self.nodes[path]['value'])
def setInt(self, path, value):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.verbose:
print('setInt', path, value)
self.nodes[path]['value'] = value
def setDouble(self, path, value):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.verbose:
print('setDouble', path, value)
self.nodes[path]['value'] = value
def setVector(self, path, value):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if self.nodes[path]['type'] != 'ZIVectorData':
raise ziRuntimeError("Unable to set node '" + path + "' of type " +
self.nodes[path]['type'] + " using setVector!")
self.nodes[path]['value'] = value
def setComplex(self, path, value):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if not self.nodes[path]['type'].startswith('Complex'):
raise ziRuntimeError("Unable to set node '" + path + "' of type " +
self.nodes[path]['type'] + " using setComplex!")
if self.verbose:
print('setComplex', path, value)
self.nodes[path]['value'] = value
def getComplex(self, path):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
if not self.nodes[path]['type'].startswith('Complex'):
raise ziRuntimeError("Unable to get node '" + path + "' of type " +
self.nodes[path]['type'] + " using getComplex!")
if self.verbose:
print('getComplex', path, self.nodes[path]['value'])
return self.nodes[path]['value']
def get(self, path, flat, flags):
if path not in self.nodes:
raise ziRuntimeError("Unknown node '" + path +
"' used with mocked server and device!")
return {path: [{'vector': self.nodes[path]['value']}]}
def getAsEvent(self, path):
self.poll_nodes.append(path)
def poll(self, poll_time, timeout, flags, flat):
poll_data = {}
for path in self.poll_nodes:
if self.verbose:
print('poll', path)
m = re.match(r'/(\w+)/qas/0/result/data/(\d+)/wave', path)
if m:
poll_data[path] = [{'vector': np.random.rand(
self.getInt('/' + m.group(1) + '/qas/0/result/length'))}]
continue
m = re.match(r'/(\w+)/qas/0/monitor/inputs/(\d+)/wave', path)
if m:
poll_data[path] = [{'vector': np.random.rand(
self.getInt('/' + m.group(1) + '/qas/0/monitor/length'))}]
continue
m = re.match(r'/(\w+)/awgs/(\d+)/ready', path)
if m:
poll_data[path] = {'value': [1]}
continue
poll_data[path] = {'value': [0]}
return poll_data
def subscribe(self, path):
if self.verbose:
print('subscribe', path)
self.poll_nodes.append(path)
def unsubscribe(self, path):
if self.verbose:
print('unsubscribe', path)
if path in self.poll_nodes:
self.poll_nodes.remove(path)
def sync(self):
"""The sync method does not need to do anything as there are no
device delays to deal with when using the mock server.
"""
pass
def _load_parameter_file(self, filename: str):
"""
Takes in a node_doc JSON file auto generates paths based on
the contents of this file.
"""
f = open(filename).read()
node_pars = json.loads(f)
for par in node_pars.values():
node = par['Node'].split('/')
# The parfile is valid for all devices of a certain type
# so the device name has to be split out.
parpath = '/' + self.device + '/' + '/'.join(node)
if par['Type'].startswith('Integer'):
self.nodes[parpath.lower()] = {'type': par['Type'], 'value': 0}
elif par['Type'].startswith('Double'):
self.nodes[parpath.lower()] = {
'type': par['Type'], 'value': 0.0}
elif par['Type'].startswith('Complex'):
self.nodes[parpath.lower()] = {
'type': par['Type'], 'value': 0 + 0j}
elif par['Type'].startswith('String'):
self.nodes[parpath.lower()] = {
'type': par['Type'], 'value': ''}
class MockAwgModule():
"""
This class implements a mock version of the awgModule object used for
compiling and uploading AWG programs. It doesn't actually compile anything, but
only maintains a counter of how often the compilation method has been executed.
For the future, the class could be updated to allow the user to select whether
the next compilation should be successful or not in order to enable more
flexibility in the unit tests of the actual drivers.
"""
def __init__(self, daq):
self._daq = daq
self._device = None
self._index = None
self._sourcestring = None
self._compilation_count = {}
if not os.path.isdir('awg/waves'):
os.makedirs('awg/waves')
def get_compilation_count(self, index):
if index not in self._compilation_count:
raise ziModuleError(
'Trying to access compilation count of invalid index ' + str(index) + '!')
return self._compilation_count[index]
def set(self, path, value):
if path == 'awgModule/device':
self._device = value
elif path == 'awgModule/index':
self._index = value
if self._index not in self._compilation_count:
self._compilation_count[self._index] = 0
elif path == 'awgModule/compiler/sourcestring':
# The compiled program is stored in _sourcestring
self._sourcestring = value
if self._index not in self._compilation_count:
raise ziModuleError(
'Trying to compile AWG program, but no AWG index has been configured!')
if self._device is None:
raise ziModuleError(
'Trying to compile AWG program, but no AWG device has been configured!')
self._compilation_count[self._index] += 1
self._daq.setInt('/' + self._device + '/' +
'awgs/' + str(self._index) + '/ready', 1)
def get(self, path):
if path == 'awgModule/device':
value = [self._device]
elif path == 'awgModule/index':
value[self._index]
elif path == 'awgModule/compiler/statusstring':
value = ['File successfully uploaded']
else:
value = ['']
for elem in reversed(path.split('/')[1:]):
rv = {elem: value}
value = rv
return rv
def execute(self):
pass
##########################################################################
# Class
##########################################################################
class ZI_base_instrument(Instrument):
"""
This is a base class for Zurich Instruments instrument drivers.
It includes functionality that is common to all instruments. It maintains
a list of available nodes as JSON files in the 'zi_parameter_files'
subfolder. The parameter files should be regenerated when newer versions
of the firmware are installed on the instrument.
The base class also manages waveforms for the instruments. The waveforms
are kept in a table, which is kept synchronized with CSV files in the
awg/waves folder belonging to LabOne. The base class will select whether
to compile and configure an instrument based on changes to the waveforms
and to the requested AWG program. Basically, if a waveform changes length
or if the AWG program changes, then the program will be compiled and
uploaded the next time the user executes the 'start' method. If a waveform
has changed, but the length is the same, then the waveform will simply
be updated on the instrument using a a fast waveform upload technique. Again,
this is triggered when the 'start' method is called.
"""
##########################################################################
# Constructor
##########################################################################
def __init__(self,
name: str,
device: str,
interface: str= '1GbE',
server: str= 'localhost',
port: int= 8004,
apilevel: int= 5,
num_codewords: int= 0,
logfile:str = None,
**kw) -> None:
"""
Input arguments:
name: (str) name of the instrument as seen by the user
device (str) the name of the device e.g., "dev8008"
interface (str) the name of the interface to use ('1GbE' or 'USB')
server (str) the host where the ziDataServer is running
port (int) the port to connect to for the ziDataServer (don't change)
apilevel (int) the API version level to use (don't change unless you know what you're doing)
num_codewords (int) the number of codeword-based waveforms to prepare
logfile (str) file name where all commands should be logged
"""
t0 = time.time()
super().__init__(name=name, **kw)
# Decide which server to use based on name
if server == 'emulator':
log.info('Connecting to mock DAQ server')
self.daq = MockDAQServer(server, port, apilevel)
else:
log.info('Connecting to DAQ server')
self.daq = zi.ziDAQServer(server, port, apilevel)
if not self.daq:
raise(ziDAQError())
self.daq.setDebugLevel(0)
# Handle absolute path
self.use_setVector = "setVector" in dir(self.daq)
# Connect a device
if not self._is_device_connected(device):
log.info(f'Connecting to device {device}')
self.daq.connectDevice(device, interface)
self.devname = device
self.devtype = self.gets('features/devtype')
# We're now connected, so do some sanity checking
self._check_devtype()
self._check_versions()
self._check_options()
# Default waveform length used when initializing waveforms to zero
self._default_waveform_length = 32
# add qcodes parameters based on JSON parameter file
# FIXME: we might want to skip/remove/(add to _params_to_skip_update) entries like AWGS/*/ELF/DATA,
# AWGS/*/SEQUENCER/ASSEMBLY, AWGS/*/DIO/DATA
filename = os.path.join(os.path.dirname(os.path.abspath(
__file__)), 'zi_parameter_files', 'node_doc_{}.json'.format(self.devtype))
if not os.path.isfile(filename):
log.info(f"{self.devname}: Parameter file not found, creating '{filename}''")
self._create_parameter_file(filename=filename)
try:
# NB: defined in parent class
log.info(f'{self.devname}: Loading parameter file')
self._load_parameter_file(filename=filename)
except FileNotFoundError:
# Should never happen as we just created the file above
log.error(f"{self.devname}: parameter file for data parameters {filename} not found")
raise
# Create modules
self._awgModule = self.daq.awgModule()
self._awgModule.set('awgModule/device', device)
self._awgModule.execute()
# Will hold information about all configured waveforms
self._awg_waveforms = {}
# Asserted when AWG needs to be reconfigured
self._awg_needs_configuration = [False]*(self._num_channels()//2)
self._awg_program = [None]*(self._num_channels()//2)
# Create waveform parameters
self._num_codewords = 0
self._add_codeword_waveform_parameters(num_codewords)
# Create other neat parameters
self._add_extra_parameters()
# A list of all subscribed paths
self._subscribed_paths = []
# Structure for storing errors
self._errors = None
# Structure for storing errors that should be demoted to warnings
self._errors_to_ignore = []
# Make initial error check
self.check_errors()
# Optionally setup log file
if logfile is not None:
self._logfile = open(logfile, 'w')
else:
self._logfile = None
# Show some info
serial = self.get('features_serial')
options = self.get('features_options')
fw_revision = self.get('system_fwrevision')
fpga_revision = self.get('system_fpgarevision')
log.info('{}: serial={}, options={}, fw_revision={}, fpga_revision={}'
.format(self.devname, serial, options.replace('\n', '|'), fw_revision, fpga_revision))
self.connect_message(begin_time=t0)
##########################################################################
# Private methods: Abstract Base Class methods
##########################################################################
def _check_devtype(self):
"""
Checks that the driver is used with the correct device-type.
"""
raise NotImplementedError('Virtual method with no implementation!')
def _check_options(self):
"""
Checks that the correct options are installed on the instrument.
"""
raise NotImplementedError('Virtual method with no implementation!')
def _check_versions(self):
"""
Checks that sufficient versions of the firmware are available.
"""
raise NotImplementedError('Virtual method with no implementation!')
def _check_awg_nr(self, awg_nr):
"""
Checks that the given AWG index is valid for the device.
"""
raise NotImplementedError('Virtual method with no implementation!')
def _update_num_channels(self):
raise NotImplementedError('Virtual method with no implementation!')
def _update_awg_waveforms(self):
raise NotImplementedError('Virtual method with no implementation!')
def _num_channels(self):
raise NotImplementedError('Virtual method with no implementation!')
def _add_extra_parameters(self) -> None:
"""
Adds extra useful parameters to the instrument.
"""
log.info(f'{self.devname}: Adding extra parameters')
self.add_parameter(
'timeout',
unit='s',
initial_value=30,
parameter_class=ManualParameter,
vals=validators.Ints())
##########################################################################
# Private methods
##########################################################################
def _add_codeword_waveform_parameters(self, num_codewords) -> None:
"""
Adds parameters that are used for uploading codewords.
It also contains initial values for each codeword to ensure
that the "upload_codeword_program" works.
"""
docst = ('Specifies a waveform for a specific codeword. ' +
'The waveforms must be uploaded using ' +
'"upload_codeword_program". The channel number corresponds' +
' to the channel as indicated on the device (1 is lowest).')
self._params_to_skip_update = []
log.info(f'{self.devname}: Adding codeword waveform parameters')
for ch in range(self._num_channels()):
for cw in range(max(num_codewords, self._num_codewords)):
# NB: parameter naming identical to QWG
wf_name = gen_waveform_name(ch, cw)
if cw >= self._num_codewords and wf_name not in self.parameters:
# Add parameter
self.add_parameter(
wf_name,
label='Waveform channel {} codeword {:03}'.format(
ch+1, cw),
vals=validators.Arrays(), # min_value, max_value = unknown
set_cmd=self._gen_write_waveform(ch, cw),
get_cmd=self._gen_read_waveform(ch, cw),
docstring=docst)
self._params_to_skip_update.append(wf_name)
# Make sure the waveform data is up-to-date
self._gen_read_waveform(ch, cw)()
elif cw >= num_codewords:
# Delete parameter as it's no longer needed
if wf_name in self.parameters:
self.parameters.pop(wf_name)
self._awg_waveforms.pop(wf_name)
# Update the number of codewords
self._num_codewords = num_codewords
def _load_parameter_file(self, filename: str):
"""
Takes in a node_doc JSON file auto generates parameters based on
the contents of this file.
"""
f = open(filename).read()
node_pars = json.loads(f)
for par in node_pars.values():
node = par['Node'].split('/')
# The parfile is valid for all devices of a certain type
# so the device name has to be split out.
parname = '_'.join(node).lower()
parpath = '/' + self.devname + '/' + '/'.join(node)
# This block provides the mapping between the ZI node and QCoDes
# parameter.
par_kw = {}
par_kw['name'] = parname
if par['Unit'] != 'None':
par_kw['unit'] = par['Unit']
else:
par_kw['unit'] = 'arb. unit'
par_kw['docstring'] = par['Description']
if "Options" in par.keys():
# options can be done better, this is not sorted
par_kw['docstring'] += '\nOptions:\n' + str(par['Options'])
# Creates type dependent get/set methods
if par['Type'] == 'Integer (64 bit)':
par_kw['set_cmd'] = _gen_set_cmd(self.seti, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.geti, parpath)
# min/max not implemented yet for ZI auto docstrings #352
par_kw['vals'] = validators.Ints()
elif par['Type'] == 'Integer (enumerated)':
par_kw['set_cmd'] = _gen_set_cmd(self.seti, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.geti, parpath)
par_kw['vals'] = validators.Ints(min_value=0,
max_value=len(par["Options"]))
elif par['Type'] == 'Double':
par_kw['set_cmd'] = _gen_set_cmd(self.setd, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.getd, parpath)
# min/max not implemented yet for ZI auto docstrings #352
par_kw['vals'] = validators.Numbers()
elif par['Type'] == 'Complex Double':
par_kw['set_cmd'] = _gen_set_cmd(self.setc, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.getc, parpath)
# min/max not implemented yet for ZI auto docstrings #352
par_kw['vals'] = validators.Anything()
elif par['Type'] == 'ZIVectorData':
par_kw['set_cmd'] = _gen_set_cmd(self.setv, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.getv, parpath)
# min/max not implemented yet for ZI auto docstrings #352
par_kw['vals'] = validators.Arrays()
elif par['Type'] == 'String':
par_kw['set_cmd'] = _gen_set_cmd(self.sets, parpath)
par_kw['get_cmd'] = _gen_get_cmd(self.gets, parpath)
par_kw['vals'] = validators.Strings()
elif par['Type'] == 'CoreString':
par_kw['get_cmd'] = _gen_get_cmd(self.getd, parpath)
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = validators.Strings()
elif par['Type'] == 'ZICntSample':
par_kw['get_cmd'] = None # Not implemented
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = None # Not implemented
elif par['Type'] == 'ZITriggerSample':
par_kw['get_cmd'] = None # Not implemented
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = None # Not implemented
elif par['Type'] == 'ZIDIOSample':
par_kw['get_cmd'] = None # Not implemented
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = None # Not implemented
elif par['Type'] == 'ZIAuxInSample':
par_kw['get_cmd'] = None # Not implemented
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = None # Not implemented
elif par['Type'] == 'ZIScopeWave':
par_kw['get_cmd'] = None # Not implemented
par_kw['set_cmd'] = None # Not implemented
par_kw['vals'] = None # Not implemented
else:
raise NotImplementedError(
"Parameter '{}' of type '{}' not supported".format(
parname, par['Type']))
# If not readable/writable the methods are removed after the type
# dependent loop to keep this more readable.
if 'Read' not in par['Properties']:
par_kw['get_cmd'] = None
if 'Write' not in par['Properties']:
par_kw['set_cmd'] = None
self.add_parameter(**par_kw)
def _create_parameter_file(self, filename: str):
"""
This generates a json file Containing the node_docs as extracted
from the ZI instrument API.
Replaces the use of the s_node_pars and d_node_pars files.
"""
# Get all interesting nodes
nodes = json.loads(self.daq.listNodesJSON('/' + self.devname))
modified_nodes = {}
# Do some name mangling
for name, node in nodes.items():
name = name.replace('/' + self.devname.upper() + '/', '')
node['Node'] = name
modified_nodes[name] = node
# Dump the nodes
with open(filename, "w") as json_file:
json.dump(modified_nodes, json_file, indent=4, sort_keys=True)
def _is_device_connected(self, device):
"""
Return true if the given device is already connected to the server.
"""
if device.lower() in [x.lower() for x in self.daq.getString('/zi/devices/connected').split(',')]:
return True
else:
return False
def _get_full_path(self, paths):
"""
Concatenates the device name with one or more paths to create a fully
qualified path for use in the server.
"""
if type(paths) is list:
for p, n in enumerate(paths):
if p[0] != '/':
paths[n] = ('/' + self.devname + '/' + p).lower()
else:
paths[n] = paths[n].lower()
else:
if paths[0] != '/':
paths = ('/' + self.devname + '/' + paths).lower()
else:
paths = paths.lower()
return paths
def _get_awg_directory(self):
"""
Returns the AWG directory where waveforms should be stored.
"""
return os.path.join(self._awgModule.get('awgModule/directory')['directory'][0], 'awg')
def _initialize_waveform_to_zeros(self):
"""
Generates all zeros waveforms for all codewords.
"""
t0 = time.time()
wf = np.zeros(self._default_waveform_length)
waveform_params = [value for key, value in self.parameters.items()
if 'wave_ch' in key.lower()]
for par in waveform_params:
par(wf)
t1 = time.time()
log.debug(
'Set all waveforms to zeros in {:.1f} ms'.format(1.0e3*(t1-t0)))
def _gen_write_waveform(self, ch, cw):
def write_func(waveform):
log.debug(f"{self.devname}: Writing waveform (len {len(waveform)}) to ch{ch} cw{cw}")
# Determine which AWG this waveform belongs to
awg_nr = ch//2
# Name of this waveform
wf_name = gen_waveform_name(ch, cw)
# Check that we're allowed to modify this waveform
if self._awg_waveforms[wf_name]['readonly']:
raise ziConfigurationError(
'Trying to modify read-only waveform on '
'codeword {}, channel {}'.format(cw, ch))
# The length of HDAWG waveforms should be a multiple of 8 samples.
if (len(waveform) % 8) != 0:
log.debug(f"{self.devname}: waveform is not a multiple of 8 samples, appending zeros.")
extra_zeros = 8-(len(waveform) % 8)
waveform = np.concatenate([waveform, np.zeros(extra_zeros)])
# If the length has changed, we need to recompile the AWG program
if len(waveform) != len(self._awg_waveforms[wf_name]['waveform']):
log.debug(f"{self.devname}: Length of waveform has changed. Flagging awg as requiring recompilation.")
self._awg_needs_configuration[awg_nr] = True
# Update the associated CSV file
log.debug(f"{self.devname}: Updating csv waveform {wf_name}, for ch{ch}, cw{cw}")
self._write_csv_waveform(ch=ch, cw=cw, wf_name=wf_name,
waveform=waveform)
# And the entry in our table and mark it for update
self._awg_waveforms[wf_name]['waveform'] = waveform
log.debug(f"{self.devname}: Marking waveform as dirty.")
self._awg_waveforms[wf_name]['dirty'] = True
return write_func
def _write_csv_waveform(self, ch: int, cw: int, wf_name: str, waveform) -> None:
filename = os.path.join(
self._get_awg_directory(), 'waves',
self.devname + '_' + wf_name + '.csv')
np.savetxt(filename, waveform, delimiter=",")
def _gen_read_waveform(self, ch, cw):
def read_func():
# AWG
awg_nr = ch//2
# Name of this waveform
wf_name = gen_waveform_name(ch, cw)
log.debug(f"{self.devname}: Reading waveform {wf_name} for ch{ch} cw{cw}")
# Check if the waveform data is in our dictionary
if wf_name not in self._awg_waveforms:
log.debug(f"{self.devname}: Waveform not in self._awg_waveforms: reading from csv file.")
# Initialize elements
self._awg_waveforms[wf_name] = {
'waveform': None, 'dirty': False, 'readonly': False}
# Make sure everything gets recompiled
log.debug(f"{self.devname}: Flagging awg as requiring recompilation.")
self._awg_needs_configuration[awg_nr] = True
# It isn't, so try to read the data from CSV
waveform = self._read_csv_waveform(ch, cw, wf_name)
# Check whether we got something
if waveform is None:
log.debug(f"{self.devname}: Waveform CSV does not exist, initializing to zeros.")
# Nope, initialize to zeros
waveform = np.zeros(32)
self._awg_waveforms[wf_name]['waveform'] = waveform
# write the CSV file
self._write_csv_waveform(ch, cw, wf_name, waveform)
else:
# Got data, update dictionary
self._awg_waveforms[wf_name]['waveform'] = waveform
# Get the waveform data from our dictionary, which must now
# have the data
return self._awg_waveforms[wf_name]['waveform']
return read_func
def _read_csv_waveform(self, ch: int, cw: int, wf_name: str):
filename = os.path.join(
self._get_awg_directory(), 'waves',
self.devname + '_' + wf_name + '.csv')
try:
log.debug(f"{self.devname}: reading waveform from csv '{filename}'")
return np.genfromtxt(filename, delimiter=',')
except OSError as e:
# if the waveform does not exist yet dont raise exception
log.warning(e)
return None
def _length_match_waveforms(self, awg_nr):
"""
Adjust the length of a codeword waveform such that each individual
waveform of the pair has the same length
"""
log.info('Length matching waveforms for dynamic waveform upload.')
wf_table = self._get_waveform_table(awg_nr)
matching_updated = False
iter_id = 0
# We iterate over the waveform table
while(matching_updated or iter_id == 0):
iter_id += 1
if iter_id > 10:
raise StopIteration
log.info('Length matching iteration {}.'.format(iter_id))
matching_updated = False
for wf_name, other_wf_name in wf_table:
len_wf = len(self._awg_waveforms[wf_name]['waveform'])
len_other_wf = len(self._awg_waveforms[other_wf_name]['waveform'])
# First one is shorter
if len_wf < len_other_wf:
log.info(f"{self.devname}: Modifying {wf_name} for length matching.")
# Temporarily unset the readonly flag to be allowed to append zeros
readonly = self._awg_waveforms[wf_name]['readonly']
self._awg_waveforms[wf_name]['readonly'] = False
self.set(wf_name, np.concatenate(
(self._awg_waveforms[wf_name]['waveform'], np.zeros(len_other_wf-len_wf))))
self._awg_waveforms[wf_name]['dirty'] = True
self._awg_waveforms[wf_name]['readonly'] = readonly
matching_updated = True
elif len_other_wf < len_wf:
log.info(f"{self.devname}: Modifying {other_wf_name} for length matching.")
readonly = self._awg_waveforms[other_wf_name]['readonly']
self._awg_waveforms[other_wf_name]['readonly'] = False
self.set(other_wf_name, np.concatenate(
(self._awg_waveforms[other_wf_name]['waveform'], np.zeros(len_wf-len_other_wf))))
self._awg_waveforms[other_wf_name]['dirty'] = True
self._awg_waveforms[other_wf_name]['readonly'] = readonly
matching_updated = True
def _clear_dirty_waveforms(self, awg_nr):
"""
Adjust the length of a codeword waveform such that each individual
waveform of the pair has the same length
"""
log.info(f"{self.devname}: Clearing dirty waveform tag for AWG {awg_nr}")
for cw in range(self._num_codewords):
wf_name = gen_waveform_name(2*awg_nr+0, cw)
self._awg_waveforms[wf_name]['dirty'] = False
other_wf_name = gen_waveform_name(2*awg_nr+1, cw)
self._awg_waveforms[other_wf_name]['dirty'] = False
def _clear_readonly_waveforms(self, awg_nr):
"""
Clear the read-only flag of all configured waveforms. Typically used when switching
configurations (i.e. programs).
"""
for cw in range(self._num_codewords):
wf_name = gen_waveform_name(2*awg_nr+0, cw)
self._awg_waveforms[wf_name]['readonly'] = False
other_wf_name = gen_waveform_name(2*awg_nr+1, cw)
self._awg_waveforms[other_wf_name]['readonly'] = False
def _set_readonly_waveform(self, ch: int, cw: int):
"""
Mark a waveform as being read-only. Typically used to limit which waveforms the user
is allowed to change based on the overall configuration of the instrument and the type
of AWG program being executed.
"""
# Sanity check
if cw >= self._num_codewords:
raise ziConfigurationError(
'Codeword {} is out of range of the configured number of codewords ({})!'.format(cw, self._num_codewords))
if ch >= self._num_channels():
raise ziConfigurationError(
'Channel {} is out of range of the configured number of channels ({})!'.format(ch, self._num_channels()))
# Name of this waveform
wf_name = gen_waveform_name(ch, cw)
# Check if the waveform data is in our dictionary
if wf_name not in self._awg_waveforms:
raise ziConfigurationError(
'Trying to mark waveform {} as read-only, but the waveform has not been configured yet!'.format(wf_name))
self._awg_waveforms[wf_name]['readonly'] = True
def _upload_updated_waveforms(self, awg_nr):
"""
Loop through all configured waveforms and use dynamic waveform uploading
to update changed waveforms on the instrument as needed.
"""
# Fixme. the _get_waveform_table should also be implemented for the UFH
log.info(f"{self.devname}: Using dynamic waveform update for AWG {awg_nr}.")
wf_table = self._get_waveform_table(awg_nr)
for dio_cw, (wf_name, other_wf_name) in enumerate(wf_table):
if self._awg_waveforms[wf_name]['dirty'] or self._awg_waveforms[other_wf_name]['dirty']:
# Combine the waveforms and upload
wf_data = merge_waveforms(self._awg_waveforms[wf_name]['waveform'],
self._awg_waveforms[other_wf_name]['waveform'])
# Write the new waveform
# print('DEBUG::upload_updated_waveforms awg_nr={}; dio_cw={}\n'.format(awg_nr,dio_cw))
# print('DEBUG::upload_updated_waveforms {}'.format(wf_data))
self.setv(
'awgs/{}/waveform/waves/{}'.format(awg_nr, dio_cw), wf_data)
def _codeword_table_preamble(self, awg_nr):
"""
Defines a snippet of code to use in the beginning of an AWG program in order to define the waveforms.
The generated code depends on the instrument type. For the HDAWG instruments, we use the setDIOWaveform
function. For the UHF-QA we simply define the raw waveforms.
"""
raise NotImplementedError('Virtual method with no implementation!')
def _configure_awg_from_variable(self, awg_nr):
"""
Configures an AWG with the program stored in the object in the self._awg_program[awg_nr] member.
"""
log.info(f"{self.devname}: Configuring AWG {awg_nr} with predefined codeword program")
if self._awg_program[awg_nr] is not None:
full_program = \
'// Start of automatically generated codeword table\n' + \
self._codeword_table_preamble(awg_nr) + \
'// End of automatically generated codeword table\n' + \
self._awg_program[awg_nr]
self.configure_awg_from_string(awg_nr, full_program)
else:
logging.warning(f"{self.devname}: No program configured for awg_nr {awg_nr}.")
def _write_cmd_to_logfile(self, cmd):
if self._logfile is not None:
now = datetime.now()
now_str = now.strftime("%d/%m/%Y %H:%M:%S")
self._logfile.write(f'#{now_str}\n')
self._logfile.write(f'{self.name}.{cmd}\n')
def _flush_logfile(self):
if self._logfile is not None:
self._logfile.flush()
##########################################################################
# Public methods: node helpers
##########################################################################
def setd(self, path, value) -> None:
self._write_cmd_to_logfile(f'daq.setDouble("{path}", {value})')
self.daq.setDouble(self._get_full_path(path), value)
def getd(self, path):
return self.daq.getDouble(self._get_full_path(path))
def seti(self, path, value) -> None:
self._write_cmd_to_logfile(f'daq.setDouble("{path}", {value})')
self.daq.setInt(self._get_full_path(path), value)
def geti(self, path):
return self.daq.getInt(self._get_full_path(path))
def sets(self, path, value) -> None:
self._write_cmd_to_logfile(f'daq.setString("{path}", {value})')
self.daq.setString(self._get_full_path(path), value)
def gets(self, path):
return self.daq.getString(self._get_full_path(path))
def setc(self, path, value) -> None:
self._write_cmd_to_logfile(f'daq.setComplex("{path}", {value})')
self.daq.setComplex(self._get_full_path(path), value)
def getc(self, path):
return self.daq.getComplex(self._get_full_path(path))
def setv(self, path, value) -> None:
# Handle absolute path
# print('DEBUG::setv {} {}'.format(path,value))
if self.use_setVector:
self._write_cmd_to_logfile(f'daq.setVector("{path}", np.array({np.array2string(value, separator=",")}))')
self.daq.setVector(self._get_full_path(path), value)
else:
self._write_cmd_to_logfile(f'daq.vectorWrite("{path}", np.array({np.array2string(value, separator=",")}))')
self.daq.vectorWrite(self._get_full_path(path), value)
def getv(self, path):
path = self._get_full_path(path)
value = self.daq.get(path, True, 0)
if path not in value:
raise ziValueError('No value returned for path ' + path)
else:
return value[path][0]['vector']
def getdeep(self, path, timeout=5.0):
path = self._get_full_path(path)
self.daq.getAsEvent(path)
while timeout > 0.0:
value = self.daq.poll(0.01, 500, 4, True)
if path in value:
return value[path]
else:
timeout -= 0.01
return None
def subs(self, path:str) -> None:
full_path = self._get_full_path(path)
if full_path not in self._subscribed_paths:
self._subscribed_paths.append(full_path)
self.daq.subscribe(full_path)
def unsubs(self, path:str=None) -> None:
if path is None:
for path in self._subscribed_paths:
self.daq.unsubscribe(path)
self._subscribed_paths.clear()
else:
full_path = self._get_full_path(path)
if full_path in self._subscribed_paths:
del self._subscribed_paths[self._subscribed_paths.index(full_path)]
self.daq.unsubscribe(full_path)
def poll(self, poll_time=0.1):
return self.daq.poll(poll_time, 500, 4, True)
def sync(self) -> None:
self.daq.sync()
##########################################################################
# Public methods
##########################################################################
def start(self):
log.info(f"{self.devname}: Starting '{self.name}'")
self.check_errors()
# Loop through each AWG and check whether to reconfigure it
for awg_nr in range(self._num_channels()//2):
self._length_match_waveforms(awg_nr)
# If the reconfiguration flag is set, upload new program
if self._awg_needs_configuration[awg_nr]:
log.debug(f"{self.devname}: Detected awg configuration tag for AWG {awg_nr}.")
self._configure_awg_from_variable(awg_nr)
self._awg_needs_configuration[awg_nr] = False
self._clear_dirty_waveforms(awg_nr)
else:
log.debug(f"{self.devname}: Did not detect awg configuration tag for AWG {awg_nr}.")
# Loop through all waveforms and update accordingly
self._upload_updated_waveforms(awg_nr)
self._clear_dirty_waveforms(awg_nr)
# Start all AWG's
for awg_nr in range(self._num_channels()//2):
# Skip AWG's without programs
if self._awg_program[awg_nr] is None:
# to configure all awgs use "upload_codeword_program" or specify
# another program
logging.warning(f"{self.devname}: Not starting awg_nr {awg_nr}.")
continue
# Check that the AWG is ready
if not self.get('awgs_{}_ready'.format(awg_nr)):
raise ziReadyError(
'Tried to start AWG {} that is not ready!'.format(awg_nr))
# Enable it
self.set('awgs_{}_enable'.format(awg_nr), 1)
log.info(f"{self.devname}: Started '{self.name}'")
def stop(self):
log.info('Stopping {}'.format(self.name))
# Stop all AWG's
for awg_nr in range(self._num_channels()//2):
self.set('awgs_{}_enable'.format(awg_nr), 0)
self.check_errors()
# FIXME: temporary solution for issue
def FIXMEclose(self) -> None:
try:
# Disconnect application server
self.daq.disconnect()
except AttributeError:
pass
super().close()
def check_errors(self, errors_to_ignore=None) -> None:
raise NotImplementedError('Virtual method with no implementation!')
def clear_errors(self) -> None:
raise NotImplementedError('Virtual method with no implementation!')
def demote_error(self, code: str):
"""
Demote a ZIRuntime error to a warning.
Arguments
code (str)
The error code of the exception to ignore.
The error code gets logged as an error before the exception
is raised. The code is a string like "DIOCWCASE".
"""
self._errors_to_ignore.append(code)
def reset_waveforms_zeros(self):
"""
Sets all waveforms to an array of 48 zeros.
"""
t0 = time.time()
wf = np.zeros(48)
waveform_params = [value for key, value in self.parameters.items()
if 'wave_ch' in key.lower()]
for par in waveform_params:
par(wf)
t1 = time.time()
log.info('Set all waveforms to zeros in {:.1f} ms'.format(1.0e3*(t1-t0)))
def configure_awg_from_string(self, awg_nr: int, program_string: str,
timeout: float=15):
"""
Uploads a program string to one of the AWGs in a UHF-QA or AWG-8.
This function is tested to work and give the correct error messages
when compilation fails.
"""
log.info(f'{self.devname}: Configuring AWG {awg_nr} from string.')
# Check that awg_nr is set in accordance with devtype
self._check_awg_nr(awg_nr)
t0 = time.time()
success_and_ready = False
# This check (and while loop) is added as a workaround for #9
while not success_and_ready:
log.info(f'{self.devname}: Configuring AWG {awg_nr}...')
self._awgModule.set('awgModule/index', awg_nr)
self._write_cmd_to_logfile(f"_awgModule.set('awgModule/index', {awg_nr})")
self._awgModule.set(
'awgModule/compiler/sourcestring', program_string)
self._write_cmd_to_logfile(f"_awgModule.set('awgModule/compiler/sourcestring', \'\'\'{program_string}\'\'\')")
succes_msg = 'File successfully uploaded'
# Success is set to False when either a timeout or a bad compilation
# message is encountered.
success = True
while len(self._awgModule.get('awgModule/compiler/sourcestring')
['compiler']['sourcestring'][0]) > 0:
time.sleep(0.01)
if (time.time()-t0 >= timeout):
success = False
raise TimeoutError(
'Timeout while waiting for compilation to finish!')
comp_msg = (self._awgModule.get(
'awgModule/compiler/statusstring')['compiler']
['statusstring'][0])
if not comp_msg.endswith(succes_msg):
success = False
if not success:
print("Compilation failed, printing program:")
for i, line in enumerate(program_string.splitlines()):
print(i+1, '\t', line)
print('\n')
raise ziCompilationError(comp_msg)
# Give the device one second to respond
for i in range(10):
ready = self.getdeep(
'awgs/{}/ready'.format(awg_nr))['value'][0]
if ready != 1:
log.warning('AWG {} not ready'.format(awg_nr))
time.sleep(1)
else:
success_and_ready = True
break
t1 = time.time()
print(self._awgModule.get('awgModule/compiler/statusstring')
['compiler']['statusstring'][0] + ' in {:.2f}s'.format(t1-t0))
# Check status
if self.get('awgs_{}_waveform_memoryusage'.format(awg_nr)) > 1.0:
log.warning(f'{self.devname}: Waveform memory usage exceeds available internal memory!')
if self.get('awgs_{}_sequencer_memoryusage'.format(awg_nr)) > 1.0:
log.warning(f'{self.devname}: Sequencer memory usage exceeds available instruction memory!')
def plot_dio_snapshot(self, bits=range(32)):
raise NotImplementedError('Virtual method with no implementation!')
def plot_awg_codewords(self, awg_nr=0, range=None):
raise NotImplementedError('Virtual method with no implementation!')
def get_idn(self) -> dict:
idn_dict = {}
idn_dict['vendor'] = 'ZurichInstruments'
idn_dict['model'] = self.devtype
idn_dict['serial'] = self.devname
idn_dict['firmware'] = self.geti('system/fwrevision')
idn_dict['fpga_firmware'] = self.geti('system/fpgarevision')
return idn_dict
def load_default_settings(self):
raise NotImplementedError('Virtual method with no implementation!')
def assure_ext_clock(self) -> None:
raise NotImplementedError('Virtual method with no implementation!')
| mit |
JFriel/honours_project | networkx/build/lib/networkx/drawing/tests/test_pylab.py | 45 | 1137 | """
Unit tests for matplotlib drawing functions.
"""
import os
from nose import SkipTest
import networkx as nx
class TestPylab(object):
@classmethod
def setupClass(cls):
global plt
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
plt.rcParams['text.usetex'] = False
except ImportError:
raise SkipTest('matplotlib not available.')
except RuntimeError:
raise SkipTest('matplotlib not available.')
def setUp(self):
self.G=nx.barbell_graph(5,10)
def test_draw(self):
try:
N=self.G
nx.draw_spring(N)
plt.savefig("test.ps")
nx.draw_random(N)
plt.savefig("test.ps")
nx.draw_circular(N)
plt.savefig("test.ps")
nx.draw_spectral(N)
plt.savefig("test.ps")
nx.draw_spring(N.to_directed())
plt.savefig("test.ps")
finally:
try:
os.unlink('test.ps')
except OSError:
pass
| gpl-3.0 |
baklanovp/pystella | eve.py | 1 | 13415 | #!/usr/bin/env python3
import logging
#import numpy as np
import pystella.model.sn_eve as sneve
import pystella.rf.light_curve_plot as lcp
from pystella import phys
try:
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
except ImportError as ex:
import os
import sys
exc_type, exc_obj, exc_tb = sys.exc_info()
fn = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logging.info(exc_type, fn, exc_tb.tb_lineno, ex)
logging.info(' Probably, you should install module: {}'.format('matplotlib'))
print()
# print(ex)
plt = None
mlines = None
# matplotlib.use("Agg")
# import matplotlib
# matplotlib.rcParams['backend'] = "TkAgg"
# matplotlib.rcParams['backend'] = "Qt5Agg"
# matplotlib.rcParams['backend'] = "Qt4Agg"
__author__ = 'bakl'
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
markers = {u'x': u'x', u'd': u'thin_diamond',
u'+': u'plus', u'*': u'star', u'o': u'circle', u'v': u'triangle_down', u'<': u'triangle_left'}
markers_style = list(markers.keys())
lines_style = lcp.linestyles
def get_parser():
import argparse
parser = argparse.ArgumentParser(description='Process PreSN configuration.')
parser.add_argument('-b', '--box',
required=False,
type=str,
default=None,
dest='box',
help='Make boxcar average, for example: '
'Delta_mass:Number:[True, if info], -b 0.5:4 . '
'Use key -e _ELEM [-e _Ni56]to exclude elements')
parser.add_argument('-r', '--rho', nargs="?",
required=False,
const=True,
dest="rho",
metavar="<r OR m>",
help="Plot Rho-figure")
parser.add_argument('--is_dum', nargs="?",
required=False,
const=True,
dest="is_dum",
help="Set is_dum = TRUE to parse abn-file with dum columns")
parser.add_argument('-x',
required=False,
dest="x",
default='m',
metavar="<m OR r OR lgR OR rsun OR m OR z>",
help="Setup abscissa: radius or lg(R) OR mass OR zone")
parser.add_argument('-s', '--save',
required=False,
type=bool,
default=False,
dest="is_save_plot",
help="save plot to pdf-file, default: False")
# parser.add_argument('-c', '--chem',
# required=False,
# type=bool,
# default=True,
# dest="is_chem",
# help="Show chemical composition, default: True")
parser.add_argument('--structure', dest='is_structure', action='store_true',
help="Show the chemical composition and rho with R/M coordinates.")
parser.add_argument('--chem', dest='is_chem', action='store_true', help="Show chemical composition [default].")
parser.add_argument('--no-chem', dest='is_chem', action='store_false', help="Not show chemical composition")
parser.set_defaults(is_chem=True)
parser.add_argument('-i', '--input', action='append', nargs=1,
metavar='model name', help='Key -i can be used multiple times')
parser.add_argument('-p', '--path',
required=False,
type=str,
default='./',
dest="path",
help="Model directory")
parser.add_argument('-e', '--elements',
required=False,
type=str,
default='H:He:C:O:Si:Fe:Ni:Ni56',
dest="elements",
help="Elements directory. \n Available: {0}".format(':'.join(sneve.eve_elements)))
parser.add_argument('--reshape',
required=False,
type=str,
default=None,
dest="reshape",
help="Reshape parameters of envelope from nstart to nend to nz-zones."
"\n Format: --reshape NZON:AXIS:XMODE:START:END:KIND. You may use * to set default value."
"\n NZON: value of zones between START and END. "
"If < 0 Nzon is the same as Nzon of the initial model "
"\n AXIS: [M* OR R OR V] - reshape along mass or radius or velocity coordinate."
"\n XMODE: [lin OR rlog* OR resize] - linear OR reversed log10 OR add/remove points. "
"\n START: zone number to start reshaping. Default: 0 (first zone)"
"\n END: zone number to end reshaping. Default: None, (equal last zone)"
"\n KIND: [np OR interp1d(..kind)], kind is ('np=np.interp', 'linear', 'nearest', "
"'zero', 'slinear', 'quadratic, 'cubic', "
"'spline' = UnivariateSpline, 'gauss' = gaussian_filter1d). Default: np "
)
# parser.add_argument('-w', '--write',
# action='store_const',
# const=True,
# dest="is_write",
# help="To write the data to hyd-, abn-files")
parser.add_argument('-w', '--write',
required=False,
type=str,
default=False,
dest="write_to",
help="To write the data to hyd-, abn-files")
return parser
def print_masses(presn):
m_el_tot = 0.
for ii, el in enumerate(presn.Elements):
m = presn.mass_tot_el(el) / phys.M_sun
m_el_tot += m
print(f' {el:3}: {m:.3e}')
print(f' M_full(Elements) = {m_el_tot:.3f}')
print(f' M_total = {presn.m_tot/phys.M_sun:.3f}')
# via density
print(f' M_tot(Density) = {presn.mass_tot_rho()/phys.M_sun:.3f}')
def main():
import os
import sys
from itertools import cycle
def get(arr, i, default):
if i < len(arr):
if a[i] != '*':
return a[i]
return default
parser = get_parser()
args, unknownargs = parser.parse_known_args()
eve_prev = None
markersize = 4
fig = None
if args.path:
pathDef = os.path.expanduser(args.path)
else:
pathDef = os.getcwd()
# if args.elements:
if '_' in args.elements:
elements = list(sneve.eve_elements)
excluded = args.elements.split(':')
for e in excluded:
if not e.startswith('_'):
logger.error('For excluded mode all elements should be starts from -. Even element: ' + e)
sys.exit(2)
e = e[1:]
if e not in sneve.eve_elements:
logger.error('No such element: ' + e)
sys.exit(2)
elements.remove(e)
else:
elements = args.elements.split(':')
for e in elements:
if e not in sneve.eve_elements:
logger.error('No such element: ' + e)
sys.exit(2)
# Set model names
names = []
if args.input:
for nm in args.input:
names.append(nm[0]) # remove extension
else:
if len(unknownargs) > 0:
names.append(unknownargs[0])
if len(names) == 0:
# logger.error(" No data. Use key '-i' ")
parser.print_help()
sys.exit(2)
if len(names) > 1: # special case
markers_cycler = cycle(markers_style)
lines_cycler = cycle(lines_style)
else:
markers_cycler = cycle([None])
lines_cycler = cycle(['-'])
ax = None
ax2 = None
handles_nm = []
for nm in names:
print("Run eve-model %s" % nm)
path, name = os.path.split(nm)
if len(path) == 0:
path = pathDef
name = name.replace('.rho', '') # remove extension
# print("Run eve-model %s in %s" % (name, path))
try:
rho_file = os.path.join(path, name + '.rho')
eve = sneve.load_rho(rho_file)
except ValueError:
try:
# With header
eve = sneve.load_hyd_abn(name=name, path=path, is_dm=False, is_dum=args.is_dum)
except ValueError:
# No header
eve = sneve.load_hyd_abn(name=name, path=path, is_dm=False, is_dum=args.is_dum, skiprows=0)
if args.reshape is not None:
a = args.reshape.split(':')
nz, axis, xmode = get(a, 0, eve.nzon), get(a, 1, 'M'), get(a, 2, 'resize') # rlog
start, end = get(a, 3, 0), get(a, 4, None)
kind = get(a, 5, 'np')
start = int(start)
if end is not None:
end = int(end)
nz = int(nz)
print(f'Resize: before Nzon={eve.nzon}')
print(f'Resize parameters: nznew= {nz} axis={axis} xmode={xmode} '
f'start= {start} end= {end} kind= {kind}')
print("The element masses: before Resize")
print_masses(eve)
eve = eve.reshape(nz=nz, axis=axis, xmode=xmode, start=start, end=end, kind=kind)
eve.chem_norm()
# eve = eve_resize
print(f'Resize: after Nzon={eve.nzon}')
print("The element masses: after Resize")
print_masses(eve)
# Boxcar
if args.box is not None:
is_info = False
s = args.box.split(':')
dm, n = float(s[0]), int(s[1])
if len(s) == 3:
is_info = bool(s[2])
print(f'Running boxcar average: dm= {dm} Msun Repeats= {n}')
print("The element masses: Before boxcar")
print_masses(eve)
eve_box = eve.boxcar(box_dm=dm, n=n, el_included=elements, is_info=is_info)
print("The element masses: After boxcar")
print_masses(eve_box)
eve, eve_prev = eve_box, eve
if args.write_to:
fname = os.path.expanduser(args.write_to)
# fname = os.path.join(path, name)
# f = fname + '.eve.abn'
fname = fname.replace('.rho', '')
f = fname + '.abn'
if eve.write_abn(f, is_header=True):
print(" abn has been saved to {}".format(f))
else:
print("Error with abn saving to {}".format(f))
# f = fname + '.eve.hyd'
f = fname + '.hyd'
if eve.write_hyd(f):
print(" hyd has been saved to {}".format(f))
else:
print("Error with hyd saving to {}".format(f))
continue
marker = next(markers_cycler)
ls = next(lines_cycler)
if args.is_structure:
fig = eve.plot_structure(elements=elements, title=name, ylimChem=(1e-8, 1.))
else:
if args.is_chem:
# print "Plot eve-model %s" % name
ax = eve.plot_chem(elements=elements, ax=ax, x=args.x, ylim=(1e-8, 1.), marker=marker,
markersize=markersize)
if eve_prev is not None:
eve_prev.plot_chem(elements=elements, ax=ax, x=args.x, ylim=(1e-8, 1.), marker=marker,
markersize=max(1, markersize - 2), alpha=0.5)
# ax.set_title('{}: before boxcar'.format(eve_prev.Name))
if args.rho:
if args.is_chem:
if ax2 is None:
ax2 = ax.twinx()
ax2.set_ylabel(r'$\rho, [g/cm^3]$ ')
else:
ax2 = ax
ax2 = eve.plot_rho(x=args.x, ax=ax2, ls=ls, marker=marker)
if eve_prev is not None:
eve_prev.plot_rho(x=args.x, ax=ax2, ls=ls, markersize=max(1, markersize - 2), alpha=0.5)
else:
ls = 'None'
handle = mlines.Line2D([], [], color='black', marker=marker,
markersize=markersize, label=name, linestyle=ls)
handles_nm.append(handle)
if len(names) > 1:
if ax2 is None:
ax2 = ax.twinx()
ax2.legend(handles=handles_nm, loc=4, fancybox=False, frameon=False)
if not args.write_to:
if args.is_save_plot:
if args.rho:
fsave = os.path.join(os.path.expanduser('~/'), 'rho_%s.pdf' % names[0])
else:
fsave = os.path.join(os.path.expanduser('~/'), 'chem_%s.pdf' % names[0])
logger.info(" Save plot to %s " % fsave)
if fig is None:
fig = ax.get_figure()
fig.savefig(fsave, bbox_inches='tight')
else:
plt.show()
if __name__ == '__main__':
main()
| mit |
IshankGulati/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 25 | 16022 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
| bsd-3-clause |
yask123/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
joernhees/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 42 | 27323 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
def test_cosine_distances():
# Check the pairwise Cosine distances computation
rng = np.random.RandomState(1337)
x = np.abs(rng.rand(910))
XA = np.vstack([x, x])
D = cosine_distances(XA)
assert_array_almost_equal(D, [[0., 0.], [0., 0.]])
# check that all elements are in [0, 2]
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0., 0.])
XB = np.vstack([x, -x])
D2 = cosine_distances(XB)
# check that all elements are in [0, 2]
assert_true(np.all(D2 >= 0.))
assert_true(np.all(D2 <= 2.))
# check that diagonal elements are equal to 0 and non diagonal to 2
assert_array_almost_equal(D2, [[0., 2.], [2., 0.]])
# check large random matrix
X = np.abs(rng.rand(1000, 5000))
D = cosine_distances(X)
# check that diagonal elements are equal to 0
assert_array_almost_equal(D[np.diag_indices_from(D)], [0.] * D.shape[0])
assert_true(np.all(D >= 0.))
assert_true(np.all(D <= 2.))
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
spallavolu/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
spragunr/echolocation | view.py | 1 | 1280 | import h5py
import sys
import matplotlib.pyplot as plt
from scipy import signal
import numpy as np
data = h5py.File(sys.argv[1], 'r')
i = 0
while True:
entered = input("which? (enter for next)")
if entered == "":
i += 1
else:
i = entered
rows = 5
for row in range(rows):
plt.subplot(rows,5,1 + 5 * row)
rgb = plt.imshow(data['rgb'][i + row,...])
plt.subplot(rows,5,2 + 5 * row)
plt.plot(data['audio_aligned'][i + row,:,:])
plt.ylim([-2**15, 2**15])
plt.subplot(rows,5,3 + 5 * row)
f, t, Sxx = signal.spectrogram(data['audio_aligned'][i + row,:,0], 44100, nperseg=256,
noverlap =255)
plt.pcolormesh(t, f, np.log(1 + Sxx))
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.subplot(rows,5,4 + 5 * row)
f, t, Sxx = signal.spectrogram(data['audio_aligned'][i + row,:,1], 44100, nperseg=256,
noverlap =255)
plt.pcolormesh(t, f, np.log(1 + Sxx))
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.subplot(rows,5,5 + 5 * row)
plt.imshow(data['depth'][i + row,...])
plt.show()
| mit |
rjeli/scikit-image | doc/examples/segmentation/plot_marked_watershed.py | 9 | 1988 | """
===============================
Markers for watershed transform
===============================
The watershed is a classical algorithm used for **segmentation**, that
is, for separating different objects in an image.
Here a marker image is built from the region of low gradient inside the image.
In a gradient image, the areas of high values provide barriers that help to
segment the image.
Using markers on the lower values will ensure that the segmented objects are
found.
See Wikipedia_ for more details on the algorithm.
.. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing)
"""
from scipy import ndimage as ndi
import matplotlib.pyplot as plt
from skimage.morphology import watershed, disk
from skimage import data
from skimage.filters import rank
from skimage.util import img_as_ubyte
image = img_as_ubyte(data.camera())
# denoise image
denoised = rank.median(image, disk(2))
# find continuous region (low gradient -
# where less than 10 for this image) --> markers
# disk(5) is used here to get a more smooth image
markers = rank.gradient(denoised, disk(5)) < 10
markers = ndi.label(markers)[0]
# local gradient (disk(2) is used to keep edges thin)
gradient = rank.gradient(denoised, disk(2))
# process the watershed
labels = watershed(gradient, markers)
# display results
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title("Original")
ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest')
ax[1].set_title("Local Gradient")
ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest')
ax[2].set_title("Markers")
ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7)
ax[3].set_title("Segmented")
for a in ax:
a.axis('off')
fig.tight_layout()
plt.show()
| bsd-3-clause |
icdishb/scikit-learn | sklearn/svm/tests/test_svm.py | 14 | 29378 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.datasets.samples_generator import make_classification
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1).fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1)
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="auto"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('auto', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='auto' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='auto')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_inheritance():
# check that SVC classes can do inheritance
class ChildSVC(svm.SVC):
def __init__(self, foo=0):
self.foo = foo
svm.SVC.__init__(self)
clf = ChildSVC()
clf.fit(iris.data, iris.target)
clf.predict(iris.data[-1])
clf.decision_function(iris.data[-1])
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0)
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
anirudhjayaraman/scikit-learn | examples/ensemble/plot_gradient_boosting_regression.py | 227 | 2520 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 1,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
vermouthmjl/scikit-learn | sklearn/model_selection/_search.py | 16 | 38824 | """
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin, ChangedBehaviorWarning
from ._split import check_cv
from ._validation import _fit_and_score
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.random import sample_without_replacement
from ..utils.validation import _num_samples, indexable
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y, labels = indexable(X, y, labels)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_splits):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_splits]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_splits)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
| bsd-3-clause |
NCBI-Hackathons/DASH_cell_type | model.py | 2 | 1142 | """Use parameters trained in grid search cross validation,
fit a model and compare prediction of test RNA-seq dataset."""
import sklearn
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.linear_model import SGDClassifier
from collections import Counter
train = pd.read_table('final.txt')
del train['batch']
y = train['tissue']
del train['tissue']
mel = pd.read_table('GSE62526_Normalized_expression_values.txt.norm')
cll = pd.read_table('GSE66117_CLL_FPKM_values.txt.norm')
del mel['gene']
del cll['gene']
pca = PCA(40)
pca.fit(train)
X = pca.transform(train)
melpc = pca.transform(mel)
cllpc = pca.transform(cll)
#params from cross validation on training set
sgd = SGDClassifier('log', penalty='elasticnet', alpha=0.008, l1_ratio=0.4)
sgd.fit(X, y)
melpred = sgd.predict(melpc)
cllpred = sgd.predict(cllpc)
# all 29 are melanoma cancer
print melpred
#print Counter(melpred)
# Counter({'cancer_melanoma': 12, 'normal_ovary': 8, 'normal_lung': 7, 'normal_blood': 2})
# last 5 are controls, we got 1, acc 44.4%
print cllpred
#print Counter(cllpred)
# Counter({'cancer_leukemia': 48, 'normal_blood': 4}), acc 88.5%
| cc0-1.0 |
boada/planckClusters | sims/plot_mag_z_relation.py | 1 | 7866 | #!/usr/bin/env python
from scipy import interpolate
from glob import glob
import os
import cosmology
import numpy
import matplotlib.pyplot as plt
import matplotlib
from scipy.stats import linregress
Polygon = matplotlib.patches.Polygon
def mag_z(axes):
LBCG = 4.0
z = numpy.arange(0.01, 1.5, 0.01)
mstar = mi_star_evol(z)
mstar_sub = mstar - 2.5 * numpy.log10(0.4)
BCG = mstar - 2.5 * numpy.log10(LBCG)
axes.plot(z, mstar_sub, 'k-', linewidth=0.5, label='$0.4L_\star$ galaxy')
axes.plot(z, mstar, 'k-', linewidth=1.5, label='$L_\star$ galaxy')
axes.plot(z, BCG, 'k-', linewidth=2.5, label='$%dL_\star$ (BCG)' % LBCG)
axes.set_xlabel('Redshift')
axes.legend(loc='lower right', fancybox=True, shadow=True)
axes.set_xlim(0.05, 1.5)
axes.set_ylim(20, 26)
return
def calc_completeness_model(fields):
''' Calculates the completeness using a histogram. '''
from astropy.io import ascii
bins = numpy.arange(15, 30, 0.5)
centers = (bins[:-1] + bins[1:]) / 2
data_dir = '../data/proc2_small/'
completeness_hist = []
for f in fields:
cat = '{}/{}/{}i_cal.cat'.format(data_dir, f, f)
cat = ascii.read(cat)
cat = cat.to_pandas()
cat = cat.loc[cat.MAG_AUTO < 40]
cat = cat.loc[cat.CLASS_STAR < 0.8]
# make a bunch of figures
n, bins_ = numpy.histogram(cat['MAG_AUTO'], bins=bins)
# make it a log plot
logn = numpy.log10(n)
# find the peak
peak = numpy.argmax(logn)
# make a model from mag 18.5 - 21.5
model = linregress(centers[peak - 5:peak], logn[peak - 5:peak])
# convert the linear model in lin-log space to log in linear space
# and figure out where 80% completeness is
# see https://en.wikipedia.org/wiki/Semi-log_plot
y = n / (10**model.intercept * 10**(centers * model.slope))
x = centers
# plot(y, x) to see how the ratio curve goes.
func = interpolate.interp1d(x, y)
# the interpolate wasn't doing very well...
# when just asked what is 80%
mags = numpy.arange(centers[0], centers[-1], 0.1)
magdiff = 0.8 - func(mags)
# find the last bin where the difference is negative
# this is the bin, with the highest magnitude, where we go from having
# more observed objects to more objects in the model.
mag_idx = numpy.where(magdiff < 0)[0][-1]
completeness_hist.append(mags[mag_idx])
print(f, '{:.3f}'.format(mags[mag_idx]))
return completeness_hist
def mag_lim_hist(axes):
Ngal_o = 100
m1 = 20.0
m2 = 25.0
dm = 0.2
Niter = 10
filter = 'i'
path = '../data/sims/Catalogs_Gal_small/'
# find all of the fields we have hunted
imgs = glob('../cluster_search/round2/PSZ*/**/*A.png', recursive=True)
fields = [i.split('/')[-2] for i in imgs]
mag = numpy.arange(m1, m2, dm)
completeness = []
for f in fields:
frac = numpy.zeros_like(mag)
Ngal = Ngal_o * Niter
for i, m in enumerate(mag):
cmd = "cat %s/%s_m%.2f_%s_%s.dat | wc" % (path, f,
mag[i],
filter, '*')
# Do simple cat + wc and redirect and split stdout
Nobs = os.popen(cmd).readline().split()[0]
frac[i] = float(Nobs) / Ngal
# figure out the 80% completeness
func = interpolate.interp1d(frac, mag)
try:
completeness.append(func(0.8))
except ValueError:
completeness.append(mag[numpy.argmax(frac)])
axes.hist(completeness, bins=mag, color='#348abd', orientation='horizontal')
#axes.hist(completeness_low, bins=mag, color='#348abd')
#axes.set_ylabel('$N_{fields}$')
axes.set_xlabel('$N_{fields}$')
return axes
def mag_lim_hist_model(axes):
m1 = 20.0
m2 = 25.0
dm = 0.2
mag = numpy.arange(m1, m2, dm)
# find all of the fields we have hunted
imgs = glob('../cluster_search/round2/PSZ*/**/*A.png', recursive=True)
fields = [i.split('/')[-2] for i in imgs]
completeness = calc_completeness_model(fields)
axes.hist(completeness, bins=mag, color='#348abd',
orientation='horizontal', histtype='stepfilled')
# flip the axis
axes.invert_xaxis()
axes.set_ylim(20, 26)
axes.set_xlabel('$N_{fields}$')
axes.set_ylabel('Limiting i Magnitude')
return axes, fields, completeness
# observed mi_star as a function of redshift
def mi_star_evol(z, h=0.7, cosmo=(0.3, 0.7, 0.7)):
# Blanton's number i.e. M* - 1.5 mags
BCSPIPE = '/home/boada/Projects/planckClusters/MOSAICpipe'
evolfile = "1_0gyr_hr_m62_salp.color"
evolfile = os.path.join(BCSPIPE, "LIB/evol", evolfile)
k, ev, c = KEfit(evolfile)
dlum = cosmology.dl(z, cosmology=cosmo)
# Blanton M*
Mi_star = -21.22 - 5 * numpy.log10(h) # + self.evf['i'](z)[0]
dlum = cosmology.dl(z, cosmology=cosmo)
DM = 25.0 + 5.0 * numpy.log10(dlum)
mx = Mi_star + DM + k['i'](z) + ev['i'](z) - ev['i'](0.1)
return mx
##################################################################
# Read both kcorrection k(z) and evolution ev(z) from BC03 model
##################################################################
def KEfit(modelfile):
import scipy
import scipy.interpolate
import tableio
print("# Getting K(z) and Ev(z) corrections from file: %s\n" % modelfile)
e = {}
k = {}
c = {}
(z, c_gr, c_ri, c_iz, k_g, k_r, k_i, k_z, e_g, e_r, e_i,
e_z) = tableio.get_data(
modelfile, cols=(0, 3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17))
# K-only correction at each age SED,
k['g'] = scipy.interpolate.interp1d(z, k_g)
k['r'] = scipy.interpolate.interp1d(z, k_r)
k['i'] = scipy.interpolate.interp1d(z, k_i)
k['z'] = scipy.interpolate.interp1d(z, k_z)
# Evolution term alone
e['g'] = scipy.interpolate.interp1d(z, e_g)
e['r'] = scipy.interpolate.interp1d(z, e_r)
e['i'] = scipy.interpolate.interp1d(z, e_i)
e['z'] = scipy.interpolate.interp1d(z, e_z)
# Color redshift
c['gr'] = scipy.interpolate.interp1d(z, c_gr)
c['ri'] = scipy.interpolate.interp1d(z, c_ri)
c['iz'] = scipy.interpolate.interp1d(z, c_iz)
return k, e, c
def add_z_cl(ax, fields, completeness):
# fix the path to get the results
import sys
sys.path.append('../results/')
from get_results import loadClusters
# confirmed clusters
high_conf = ['PSZ1_G206.45+13.89',
'PSZ1_G224.82+13.62',
'PSZ2_G029.66-47.63',
'PSZ2_G043.44-41.27',
'PSZ2_G096.43-20.89',
'PSZ2_G120.76+44.14',
'PSZ2_G125.55+32.72',
'PSZ2_G137.24+53.93',
'PSZ2_G305.76+44.79',
'PSZ2_G107.83-45.45',
'PSZ2_G098.38+77.22',
'PSZ1_G084.62-15.86',
'PSZ2_G106.11+24.11',
'PSZ2_G173.76+22.92',
'PSZ2_G191.82-26.64']
# get the density for the confirmed fields.
depth = [completeness[fields.index(hc)] for hc in numpy.sort(high_conf)]
# the confirmed = True gets the 15 confirmed clusters
results = loadClusters(round=3, confirmed=True)
# sort the results
results.sort_values('Cluster', inplace=True)
ax.scatter(results['z_cl_boada'], depth, s=150, marker='*', color='#e24a33')
return ax
if __name__ == "__main__":
f = plt.figure(figsize=(7, 7 * (numpy.sqrt(5.) - 1.0) / 2.0))
ax = plt.subplot2grid((1, 4), (0, 0), colspan=2)
axs = plt.subplot2grid((1, 4), (0, 2), colspan=2)
ax, fields, completeness = mag_lim_hist_model(ax)
mag_z(axs)
add_z_cl(axs, fields, completeness)
plt.tight_layout()
plt.show()
| mit |
hsiaoyi0504/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
lcharleux/compmod | doc/example_code/models/ring_compression_compart.py | 1 | 5409 | from compmod.models import RingCompression
from abapy import materials
from abapy.misc import load
import matplotlib.pyplot as plt
import numpy as np
import pickle, copy
import platform
#PAREMETERS
compart = True
is_3D = True
unloading = False
export_fields = False
inner_radius, outer_radius = 45.96 , 50
Nt, Nr, Na = 80, 8, 10
#Ne = Nt * Nr
if is_3D == False :
Ne = Nt * Nr
elType = "CPS4"
else:
Ne = Nt * Nr * Na
elType = "C3D8"
disp = 45.
nFrames = 100
thickness = 15.
E = 64000. * np.ones(Ne) # Young's modulus
nu = .3 * np.ones(Ne) # Poisson's ratio
Ssat = 910. * np.ones(Ne)
n = 207. * np.ones(Ne)
sy_mean = 165.
ray_param = sy_mean/1.253314
sy = np.random.rayleigh(ray_param, Ne)
labels = ['mat_{0}'.format(i+1) for i in xrange(len(sy))]
material = [materials.Bilinear(labels = labels[i], E = E[i], nu = nu[i], Ssat = Ssat[i], n=n[i], sy = sy[i]) for i in xrange(Ne)]
workdir = "workdir/"
label = "ringCompression_compart"
filename = 'force_vs_disp_ring1.txt'
node = platform.node()
if node == 'serv2-ms-symme':
cpus = 6
else:
cpus = 1
if node == 'lcharleux': abqlauncher = '/opt/Abaqus/6.9/Commands/abaqus' # Ludovic
if node == 'serv2-ms-symme': abqlauncher = '/opt/abaqus/Commands/abaqus' # Linux
if node == 'epua-pd47':
abqlauncher = 'C:/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe' # Local machine configuration
if node == 'SERV3-MS-SYMME':
abqlauncher = '"C:/Program Files (x86)/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe"' # Local machine configuration
if node == 'epua-pd45':
abqlauncher = 'C:\SIMULIA/Abaqus/Commands/abaqus'
def read_file(file_name):
'''
Read a two rows data file and converts it to numbers
'''
f = open(file_name, 'r') # Opening the file
lignes = f.readlines() # Reads all lines one by one and stores them in a list
f.close() # Closing the file
# lignes.pop(0) # Delete le saut de ligne for each lines
force_exp, disp_exp = [],[]
for ligne in lignes:
data = ligne.split() # Lines are splitted
disp_exp.append(float(data[0]))
force_exp.append(float(data[1]))
return -np.array(disp_exp), -np.array(force_exp)
disp_exp, force_exp = read_file(filename)
#TASKS
run_sim = True
plot = True
#MODEL DEFINITION
m = RingCompression( material = material ,
inner_radius = inner_radius,
outer_radius = outer_radius,
disp = disp/2,
thickness = thickness/2,
nFrames = nFrames,
Nr = Nr,
Nt = Nt,
Na = Na,
unloading = unloading,
export_fields = export_fields,
workdir = workdir,
label = label,
elType = elType,
abqlauncher = abqlauncher,
cpus = cpus,
is_3D = is_3D,
compart = compart)
# SIMULATION
m.MakeMesh()
if run_sim:
m.MakeInp()
m.Run()
m.PostProc()
# SOME PLOTS
mesh = m.mesh
outputs = load(workdir + label + '.pckl')
if outputs['completed']:
# Fields
if export_fields == True :
def field_func(outputs, step):
"""
A function that defines the scalar field you want to plot
"""
return outputs['field']['S'][step].vonmises()
def plot_mesh(ax, mesh, outputs, step, field_func =None, zone = 'upper right', cbar = True, cbar_label = 'Z', cbar_orientation = 'horizontal', disp = True):
"""
A function that plots the deformed mesh with a given field on it.
"""
mesh2 = copy.deepcopy(mesh)
if disp:
U = outputs['field']['U'][step]
mesh2.nodes.apply_displacement(U)
X,Y,Z,tri = mesh2.dump2triplot()
xb,yb,zb = mesh2.get_border()
xe, ye, ze = mesh2.get_edges()
if zone == "upper right": kx, ky = 1., 1.
if zone == "upper left": kx, ky = -1., 1.
if zone == "lower right": kx, ky = 1., -1.
if zone == "lower left": kx, ky = -1., -1.
ax.plot(kx * xb, ky * yb,'k-', linewidth = 2.)
ax.plot(kx * xe, ky * ye,'k-', linewidth = .5)
if field_func != None:
field = field_func(outputs, step)
grad = ax.tricontourf(kx * X, ky * Y, tri, field.data)
if cbar :
bar = plt.colorbar(grad, orientation = cbar_orientation)
bar.set_label(cbar_label)
# Exp data
disp_exp, force_exp = read_file("test_expD2.txt")
fig = plt.figure("Fields")
plt.clf()
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect('equal')
plt.grid()
plot_mesh(ax, mesh, outputs, 0, field_func, cbar_label = '$\sigma_{eq}$')
plot_mesh(ax, mesh, outputs, 0, field_func = None, cbar = False, disp = False)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.savefig(workdir + label + '_fields.pdf')
# Load vs disp
force = -4. * outputs['history']['force']
disp = -2. * outputs['history']['disp']
fig = plt.figure('Load vs. disp')
plt.clf()
plt.plot(disp.data[0], force.data[0], 'ro-', label = 'Loading', linewidth = 2.)
if unloading == True : plt.plot(disp.data[1], force.data[1], 'bv-', label = 'Unloading', linewidth = 2.)
plt.plot(disp_exp, force_exp, 'k-', label = 'Exp', linewidth = 2.)
plt.legend(loc="upper left")
plt.grid()
plt.xlabel('Displacement, $U$')
plt.ylabel('Force, $F$')
plt.savefig(workdir + label + '_load-vs-disp.pdf')
else:
print 'Simulation not completed'
g = open('tutu.txt', 'w')
for i in range(0, len(disp.data[0])):
line = g.write(repr(disp.data[0][i]) + '\t' + repr(force.data[0][i]))
line = g.write('\n')
g.close()
| gpl-2.0 |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/python/lib/python3.3/site-packages/numpy/lib/function_base.py | 1 | 115305 | __docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
'bartlett', 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc',
'add_docstring', 'meshgrid', 'delete', 'insert', 'append', 'interp',
'add_newdoc_ufunc']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from ._compiled_base import _insert, add_docstring
from ._compiled_base import digitize, bincount, interp as compiled_interp
from .arraysetops import setdiff1d
from .utils import deprecate
from ._compiled_base import add_newdoc_ufunc
import numpy as np
import collections
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
nbin = asarray(nbin)
# Handle empty input.
if N == 0:
return np.zeros(nbin-2), edges
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays -- useful if your data contains
"missing" values
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a, dtype=None, order=None):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a, dtype=float)
array([1., 2.])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a, dtype=dtype, order=order)
if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all():
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not isinstance(item, collections.Callable):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a, order='K'):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :meth:ndarray.copy are very
similar, but have different default values for their order=
arguments.)
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, order=order, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
otype = 'd'
# Difference of datetime64 elements results in timedelta64
if otype == 'M' :
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace('datetime', 'timedelta')
elif otype == 'm' :
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f, dtype=otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
diff : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.copyto(ddmod, pi, where=(ddmod==-pi) & (dd > 0))
ph_correct = ddmod - dd;
_nx.copyto(ph_correct, 0, where=abs(dd)<discont)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
Returns
-------
extract : ndarray
Rank 1 array of values from `arr` where `condition` is True.
See Also
--------
take, put, copyto, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that
`place` uses the first N elements of `vals`, where N is the number of
True values in `mask`, while `copyto` uses the elements where `mask`
is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
copyto, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.copyto(y, fill, where=mask)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None, excluded=None, cache=False)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If `None`, the docstring will be the
``pyfunc.__doc__``.
excluded : set, optional
Set of strings or integers representing the positional or keyword
arguments for which the function will not be vectorized. These will be
passed directly to `pyfunc` unmodified.
.. versionadded:: 1.7.0
cache : bool, optional
If `True`, then cache the first function call that determines the number
of outputs if `otypes` is not provided.
.. versionadded:: 1.7.0
Returns
-------
vectorized : callable
Vectorized function.
Examples
--------
>>> def myfunc(a, b):
... "Return a-b if a>b, otherwise return a+b"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
such as the coefficients for a polynomial as in `polyval`:
>>> def mypolyval(p, x):
... _p = list(p)
... res = _p.pop(0)
... while _p:
... res = res*x + _p.pop(0)
... return res
>>> vpolyval = np.vectorize(mypolyval, excluded=['p'])
>>> vpolyval(p=[1, 2, 3], x=[0, 1])
array([3, 6])
Positional arguments may also be excluded by specifying their position:
>>> vpolyval.excluded.add(0)
>>> vpolyval([1, 2, 3], x=[0, 1])
array([3, 6])
Notes
-----
The `vectorize` function is provided primarily for convenience, not for
performance. The implementation is essentially a for loop.
If `otypes` is not specified, then a call to the function with the first
argument will be used to determine the number of outputs. The results of
this call will be cached if `cache` is `True` to prevent calling the
function twice. However, to implement the cache, the original function must
be wrapped which will slow down subsequent calls, so only do this if your
function is expensive.
The new keyword argument interface and `excluded` argument support further
degrades performance.
"""
def __init__(self, pyfunc, otypes='', doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
def __call__(self, *args, **kwargs):
"""
Return arrays with the results of `pyfunc` broadcast (vectorized) over
`args` and `kwargs` not in `excluded`.
"""
excluded = self.excluded
if not kwargs and not excluded:
func = self.pyfunc
vargs = args
else:
# The wrapper accepts only positional arguments: we use `names` and
# `inds` to mutate `the_args` and `kwargs` to pass to the original
# function.
nargs = len(args)
names = [_n for _n in kwargs if _n not in excluded]
inds = [_i for _i in range(nargs) if _i not in excluded]
the_args = list(args)
def func(*vargs):
for _n, _i in enumerate(inds):
the_args[_i] = vargs[_n]
kwargs.update(list(zip(names, vargs[len(inds):])))
return self.pyfunc(*the_args, **kwargs)
vargs = [args[_i] for _i in inds]
vargs.extend([kwargs[_n] for _n in names])
return self._vectorize_call(func=func, args=vargs)
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple functions
# at least -- this wrapping can almost double the execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = ''.join([asarray(outputs[_k]).dtype.char
for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
def _vectorize_call(self, func, args):
"""Vectorized call to `func` over positional `args`."""
if not args:
_res = func()
else:
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
# Convert args to object arrays first
inputs = [array(_a, copy=False, subok=True, dtype=object)
for _a in args]
outputs = ufunc(*inputs)
if ufunc.nout == 1:
_res = array(outputs,
copy=False, subok=True, dtype=otypes[0])
else:
_res = tuple([array(_x, copy=False, subok=True, dtype=_t)
for _x, _t in zip(outputs, otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, with the maximum value normalized to one
(the value one appears only if the number of samples is odd), with
the first and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy.fft import fft, fftshift
>>> window = np.bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in range(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, with the maximum value normalized to one (the value
one appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise NaNs will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> np.kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> window = np.kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if sorted.shape == ():
# make 0-D arrays work
return sorted.item()
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=50``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=100``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError("percentile must be either in the range [0,100]")
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
trapz : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
"""
try:
new = {}
exec('from %s import %s' % (place, obj), new)
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# Based on scitools meshgrid
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from two or more coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
copy : bool, optional
If False, a view into the original arrays are returned in
order to conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous arrays.
Furthermore, more than one element of a broadcast array may refer to
a single memory location. If you need to write to the arrays, make
copies first.
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
Notes
-----
This function supports both indexing conventions through the indexing keyword
argument. Giving the string 'ij' returns a meshgrid with matrix indexing,
while 'xy' returns a meshgrid with Cartesian indexing. In the 2-D case
with inputs of length M and N, the outputs are of shape (N, M) for 'xy'
indexing and (M, N) for 'ij' indexing. In the 3-D case with inputs of
length M, N and P, outputs are of shape (N, M, P) for 'xy' indexing and (M,
N, P) for 'ij' indexing. The difference is illustrated by the following
code snippet::
xv, yv = meshgrid(x, y, sparse=False, indexing='ij')
for i in range(nx):
for j in range(ny):
# treat xv[i,j], yv[i,j]
xv, yv = meshgrid(x, y, sparse=False, indexing='xy')
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> nx, ny = (3, 2)
>>> x = np.linspace(0, 1, nx)
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = meshgrid(x, y)
>>> xv
array([[ 0. , 0.5, 1. ],
[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0., 0., 0.],
[ 1., 1., 1.]])
>>> xv, yv = meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
array([[ 0. , 0.5, 1. ]])
>>> yv
array([[ 0.],
[ 1.]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = meshgrid(x, y, sparse=True)
>>> z = np.sin(xx**2 + yy**2) / (xx**2 + yy**2)
>>> h = plt.contourf(x,y,z)
"""
if len(xi) < 2:
msg = 'meshgrid() takes 2 or more arguments (%d given)' % int(len(xi) > 0)
raise ValueError(msg)
args = np.atleast_1d(*xi)
ndim = len(args)
copy_ = kwargs.get('copy', True)
sparse = kwargs.get('sparse', False)
indexing = kwargs.get('indexing', 'xy')
if not indexing in ['xy', 'ij']:
raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [x.reshape(s0[:i] + (-1,) + s0[i + 1::]) for i, x in enumerate(args)]
shape = [x.size for x in output]
if indexing == 'xy':
# switch first and second axis
output[0].shape = (1, -1) + (1,)*(ndim - 2)
output[1].shape = (-1, 1) + (1,)*(ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(range(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
values = array(values, copy=False, ndmin=arr.ndim)
values = np.rollaxis(values, 0, axis+1)
obj = [obj] * values.shape[axis]
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
| gpl-3.0 |
seaotterman/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 30 | 2039 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
def optimizer_exp_decay():
global_step = tf.contrib.framework.get_or_create_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=3,
optimizer=optimizer_exp_decay)
classifier.fit(x_train, y_train, steps=800)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ChinaQuants/tushare | tushare/datayes/IV.py | 10 | 3423 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/10/12
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class IV():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def DerIv(self, beginDate='', endDate='', optID='', SecID='', field=''):
"""
原始隐含波动率,包括期权价格、累计成交量、持仓量、隐含波动率等。
"""
code, result = self.client.getData(vs.DERIV%(beginDate, endDate, optID, SecID, field))
return _ret_data(code, result)
def DerIvHv(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
历史波动率,各个时间段的收盘-收盘历史波动率。
"""
code, result = self.client.getData(vs.DERIVHV%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIndex(self, beginDate='', endDate='', SecID='', period='', field=''):
"""
隐含波动率指数,衡量30天至1080天到期平价期权的平均波动性的主要方法。
"""
code, result = self.client.getData(vs.DERIVINDEX%(beginDate, endDate, SecID, period, field))
return _ret_data(code, result)
def DerIvIvpDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于参数平滑曲线),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVIVPDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvParam(self, beginDate='', endDate='', SecID='', expDate='', field=''):
"""
隐含波动率参数化曲面,由二阶方程波动曲线在每个到期日平滑后的曲面(a,b,c曲线系数)
"""
code, result = self.client.getData(vs.DERIVPARAM%(beginDate, endDate, SecID, expDate, field))
return _ret_data(code, result)
def DerIvRawDelta(self, beginDate='', endDate='', SecID='', delta='', period='', field=''):
"""
隐含波动率曲面(基于原始隐含波动率),基于delta(0.1至0.9,0.05升步)和到期日(1个月至3年)而标准化的曲面。
"""
code, result = self.client.getData(vs.DERIVRAWDELTA%(beginDate, endDate, SecID, delta, period, field))
return _ret_data(code, result)
def DerIvSurface(self, beginDate='', endDate='', SecID='', contractType='', field=''):
"""
隐含波动率曲面(在值程度),基于在值程度而标准化的曲面。执行价格区间在-60%到+60%,5%升步,到期区间为1个月至3年。
"""
code, result = self.client.getData(vs.DERIVSURFACE%(beginDate, endDate, SecID, contractType, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| bsd-3-clause |
michalkurka/h2o-3 | h2o-py/h2o/model/extensions/varimp.py | 2 | 3362 | from h2o.utils.ext_dependencies import get_matplotlib_pyplot
from h2o.utils.typechecks import assert_is_type
class VariableImportance:
def _varimp_plot(self, num_of_features=None, server=False):
"""
Plot the variable importance for a trained model.
:param num_of_features: the number of features shown in the plot (default is 10 or all if less than 10).
:param server: if true set server settings to matplotlib and show the graph
:returns: None.
"""
assert_is_type(num_of_features, None, int)
assert_is_type(server, bool)
plt = get_matplotlib_pyplot(server)
if plt is None:
return
# get the variable importances as a list of tuples, do not use pandas dataframe
importances = self.varimp(use_pandas=False)
# features labels correspond to the first value of each tuple in the importances list
feature_labels = [tup[0] for tup in importances]
# relative importances correspond to the first value of each tuple in the importances list
scaled_importances = [tup[2] for tup in importances]
# specify bar centers on the y axis, but flip the order so largest bar appears at top
pos = range(len(feature_labels))[::-1]
# specify the bar lengths
val = scaled_importances
# default to 10 or less features if num_of_features is not specified
if num_of_features is None:
num_of_features = min(len(val), 10)
fig, ax = plt.subplots(1, 1, figsize=(14, 10))
# create separate plot for the case where num_of_features == 1
if num_of_features == 1:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
ax.margins(None, 0.5)
else:
plt.barh(pos[0:num_of_features], val[0:num_of_features], align="center",
height=0.8, color="#1F77B4", edgecolor="none")
# Hide the right and top spines, color others grey
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_color("#7B7B7B")
ax.spines["left"].set_color("#7B7B7B")
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position("left")
ax.xaxis.set_ticks_position("bottom")
plt.yticks(pos[0:num_of_features], feature_labels[0:num_of_features])
plt.ylim([min(pos[0:num_of_features])- 1, max(pos[0:num_of_features])+1])
# ax.margins(y=0.5)
# check which algorithm was used to select right plot title
plt.title("Variable Importance: H2O %s" % self._model_json["algo_full_name"], fontsize=20)
if not server:
plt.show()
| apache-2.0 |
mramire8/active | experiment/experiment_utils.py | 1 | 15927 | __author__ = 'maru'
import ast
from collections import defaultdict
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from learner.adaptive_lr import LogisticRegressionAdaptive, LogisticRegressionAdaptiveV2
import matplotlib.pyplot as plt
from strategy import base_models
def extrapolate_trials(trials, cost_25=8.2, step_size=10):
cost_delta = cost_25 * step_size # Cost of 25 words based on user study
extrapolated = defaultdict(lambda: [])
for data in trials:
# print("j:%s" % j)
trial_data = np.array(data)
# print trial_data
i = 0
current_c = np.ceil(trial_data[0, 0] / cost_delta) * cost_delta
# print "starting at %s ending at %s" % (current_c, trial_data.shape[0])
while i < trial_data.shape[0] - 1: # while reaching end of rows
a = trial_data[i]
a1 = trial_data[i + 1]
# print("P1:{0}\t{2}\tP2{1}".format(a,a1,current_c))
if a[0] <= current_c <= a1[0]:
m = (a1[1] - a[1]) / (a1[0] - a[0]) * (current_c - a[0])
z = m + a[1]
extrapolated[current_c].append(z)
# np.append(extrapolated, [current_c,z])
# print("{0},z:{1}".format(current_c,z))
current_c += cost_delta
if a1[0] < current_c:
i += 1
return extrapolated
def parse_parameters(str_parameters):
parts = str_parameters.split(",")
params = [float(xi) for xi in parts]
return params
def parse_parameters_mat(str_parameters):
params = ast.literal_eval(str_parameters)
return params
def set_expert_model():
pass
def set_classifier(cl_name, **kwargs):
clf = None
if cl_name in "mnb":
alpha = 1
if 'parameter' in kwargs:
alpha = kwargs['parameter']
clf = MultinomialNB(alpha=alpha)
elif cl_name == "lr":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegression(penalty="l1", C=c)
elif cl_name == "lrl2":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegression(penalty="l2", C=c)
elif cl_name == "lradapt":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegressionAdaptive(penalty="l1", C=c)
elif cl_name == "lradaptv2":
c = 1
if 'parameter' in kwargs:
c = kwargs['parameter']
clf = LogisticRegressionAdaptiveV2(penalty="l1", C=c)
else:
raise ValueError("We need a classifier name for the student [lr|mnb]")
return clf
def format_spent(spent):
string = ""
for s in spent:
string = string + "{0:.2f}, ".format(s)
return string
def set_cost_model(cost_function, parameters):
if "uniform" in cost_function:
# uniform cost model
cost_model = base_models.BaseCostModel()
elif "log" in cost_function:
# linear cost model f(d) = x0*ln(|d|) + x1
cost_model = base_models.LogCostModel(parameters=parameters)
elif "linear" in cost_function:
# linear cost model f(d) = x0*|d| + x1
cost_model = base_models.FunctionCostModel(parameters=parameters)
elif "direct" in cost_function:
# linear cost model f(d) = x0*|d| + x1
cost_model = base_models.LookUpCostModel(parameters=parameters)
else:
raise Exception("We need a defined cost function options [uniform|log|linear|direct]")
return cost_model
def print_results(x_axis, accuracies, aucs, ts=None):
# print the cost x-axis
print
print "Number of x points %s" % len(x_axis.keys())
axis_x = sorted(x_axis.keys())
counts = [len(x_axis[xi]) for xi in axis_x]
axis_y = [np.mean(x_axis[xi]) for xi in axis_x]
axis_z = [np.std(x_axis[xi]) for xi in axis_x]
print "Id\tCost_Mean\tCost_Std"
for a, b, c, d in zip(axis_x, axis_y, axis_z, counts):
print "%d\t%0.3f\t%0.3f\t%d" % (a, b, c, d)
# print the accuracies
x = sorted(accuracies.keys())
y = [np.mean(accuracies[xi]) for xi in x]
z = [np.std(accuracies[xi]) for xi in x]
w = [np.size(accuracies[xi]) for xi in x]
print
print "Cost\tAccu_Mean\tAccu_Std"
for a, b, c, d in zip(axis_y, y, z, w):
print "%0.3f\t%0.3f\t%0.3f\t%d" % (a, b, c, d)
x = sorted(aucs.keys())
y = [np.mean(aucs[xi]) for xi in x]
z = [np.std(aucs[xi]) for xi in x]
print
print "Cost\tAUC_Mean\tAUC_Std"
for a, b, c in zip(axis_y, y, z):
print "%0.3f\t%0.3f\t%0.3f" % (a, b, c)
if ts is not None:
x = sorted(ts.keys())
y = [np.mean(ts[xi]) for xi in x]
z = [np.std(ts[xi]) for xi in x]
print
print "Cost\tTS_Mean\tTS_Std"
for a, b, c in zip(axis_y, y, z):
print "%0.3f\t%0.3f\t%0.3f" % (a, b, c)
def plot_performance(x, y, title, xaxis, yaxis):
plt.clf()
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.legend()
plt.plot(x, y, '--bo')
# plt.hold(True)
# x = np.array([min(x), max(x)])
# y = intercept + slope * x
# plt.plot(x, y, 'r-')
plt.savefig('{1}-{0}.pdf'.format(yaxis, title))
# plt.show()
def oracle_accuracy(oracle, file_name="out", cm=None, num_trials=5):
# print the cost x-axis
print
print "Oracle Accuracy over %s" % len(oracle.keys())
# print the accuracies
x = sorted(oracle.keys())
y = [np.mean(oracle[xi]) for xi in x]
z = [np.std(oracle[xi]) for xi in x]
w = [np.size(oracle[xi]) for xi in x]
step = x[1]- x[0]
print
print "Cost\tAccu_Mean\tAccu_Std"
for a, b, c, d in zip(x, y, z, w):
print "%0.3f\t%0.3f\t%0.3f\t%d" % (a, 1.*b/step, c, d)
# plot_performance(x, y, "Oracle Accuracy Performance " + file_name, "Cost", "Oracle Accuracy")
print_file(x, y, z, "{}-accuracy.txt".format("oracle-"+file_name))
print "\nCost\tAccu_t1\tAccu_t2\tAccu_t3\tAccu_t4\tAccu_t5"
trials = [oracle[xi] for xi in x]
for xi, t in zip(x,trials):
# print "%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.3f" % (xi, *t)
line = [xi]
line.extend(t)
print "\t".join(["{0:.3f}".format(i) for i in line])
# plot_performance(x, y, "Oracle Accuracy Performance " + file_name, "Cost", "Oracle Accuracy")
print_file2(x, trials, "{}-accuracy.txt".format("oracle-bytrial"+file_name))
if cm is not None:
print "\nORACLE CONFUSION MATRIX AVERAGE"
print "\nCost\tT0\tF1\tF0\tT1\t"
trials = [cm[xi] for xi in x]
all_t = []
for xi, t in zip(x, trials):
# print "%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.3f\t%0.3f" % (xi,t[0],t[1],t[2],t[3],t[4])
sum = np.array(t[0], dtype=np.float)
for ti in t[1:]:
sum = sum + np.array(ti)
all_t.append("%0.1f \t" % xi + "\t".join(["{0[0]} {0[1]} {1[0]} {1[1]}".format(v[0],v[1]) for v in t])) ## all trials
ave = sum/num_trials
# print ave
print "%0.3f\t%s" % (xi,"{0[0][0]}\t{0[0][1]}\t{0[1][0]}\t{0[1][1]}".format(ave))
#["{0[0]} {0[1]} {1[0]} {1[1]}".format(*v) for v in t]
print_file_cm(x, cm,"{}-cm-accuracy.txt".format("oracle-"+file_name), num_trials=num_trials)
print "\nAll trials of confusion matrix"
print "\n".join(all_t)
def print_extrapolated_results(accuracies, aucs, file_name="out"):
# print the cost x-axis
print
print "Number of x points %s" % len(accuracies.keys())
# print the accuracies
x = sorted(accuracies.keys())
y = [np.mean(accuracies[xi]) for xi in x]
z = [np.std(accuracies[xi]) for xi in x]
w = [np.size(accuracies[xi]) for xi in x]
print
print "Cost\tAccu_Mean\tAccu_Std"
for a, b, c, d in zip(x, y, z, w):
print "%0.3f\t%0.3f\t%0.3f\t%d" % (a, b, c, d)
# plot_performance(x, y, "Accuracy Performance " + file_name, "Cost", "Accuracy")
print_file(x, y, z, "{}-accuracy.txt".format(file_name))
x = sorted(aucs.keys())
y = [np.mean(aucs[xi]) for xi in x]
z = [np.std(aucs[xi]) for xi in x]
print
print "Cost\tAUC_Mean\tAUC_Std"
for a, b, c in zip(x, y, z):
print "%0.3f\t%0.3f\t%0.3f" % (a, b, c)
# plot_performance(x, y, "AUC Performance " + file_name, "Cost", "AUC")
print_file(x, y, z, "{}-auc.txt".format(file_name))
def print_file(x, y, z, file_name):
f = open(file_name, "w")
f.write("COST\tMEAN\tSTDEV\n")
for a, b, c in zip(x, y, z):
f.write("{0:.3f}\t{1:.3f}\t{2:.3f}\n".format(a, b, c))
f.close()
def print_file2(x, trial, file_name):
f = open(file_name, "w")
f.write("Cost\tAccu_t1\tAccu_t2\tAccu_t3\tAccu_t4\tAccu_t5\n")
for a, t in zip(x, trial):
# print a, t
tline = "\t".join(["{0:.3f}".format(i) for i in t])
# f.write("{0:.3f}\t{1:.3f}\t{2:.3f}\t{3:.3f}\t{4:.3f}\t{5:.3f}\n".format(a,*t))
f.write("{0:.3f}\t{1}\n".format(a,tline))
f.close()
def print_file_cm(x, cm, file_name, num_trials=5.0):
f = open(file_name, "w")
f.write("Cost\tT0\tF1\tF0\tT1\n")
trials = [cm[xi] for xi in x]
for xi, t in zip(x, trials):
sum = np.array(t[0], dtype=np.float)
for ti in t[1:]:
sum = sum + np.array(ti)
ave = sum/num_trials
# print ave
f.write("%0.3f\t%s\n" % (xi,"{0[0][0]}\t{0[0][1]}\t{0[1][0]}\t{0[1][1]}".format(ave)))
f.close()
def format_list(list):
string = ""
for r in list:
for c in r:
string = string + "{0}\t".format(c)
string = string + ", "
return string
def print_features(coef, names):
""" Print sorted list of non-zero features/weights. """
### coef = clf.coef_[0]
### names = vec.get_feature_names()
print "*" * 50
print("Number of Features: %s" % len(names))
print "\n".join('%s\t%.2f' % (names[j], coef[j]) for j in np.argsort(coef)[::-1] if coef[j] != 0)
print "*" * 50
def split_data_sentences(data, sent_detector, vct, limit=0):
sent_train = []
labels = []
tokenizer = vct.build_tokenizer()
print ("Spliting into sentences... Limit:", limit)
## Convert the documents into sentences: train
for t, sentences in zip(data.target, sent_detector.batch_tokenize(data.data)):
if limit is None:
sents = [s for s in sentences if len(tokenizer(s)) > 1]
elif limit > 0:
sents = [s for s in sentences if len(s.strip()) > limit]
elif limit == 0:
sents = [s for s in sentences]
sent_train.extend(sents) # at the sentences separately as individual documents
labels.extend([t] * len(sents)) # Give the label of the document to all its sentences
return labels, sent_train #, dump
def split_data_first1sentences(data, sent_detector, vct, limit=0):
'''
This function is for testing purposes only, do not use in a model
:param data:
:param sent_detector:
:param vct:
:param limit:
:return:
'''
sent_train = []
labels = []
dumped_labels = []
tokenizer = vct.build_tokenizer()
dump = []
f1 = []
print ("Spliting into sentences... Limit:", limit)
## Convert the documents into sentences: train
for t, sentences in zip(data.target, sent_detector.batch_tokenize(data.data)):
# sents = [s for s in sentences if len(s) > 1]
f1.extend([sentences[0]])
if limit is None:
sents = [s for s in sentences if len(tokenizer(s)) > 1]
elif limit > 0:
sents = [s for s in sentences if len(s.strip()) > limit]
elif limit == 0:
sents = [s for s in sentences]
dump2 = [s for s in sentences if len(s.strip()) <= limit]
dump.extend(dump2)
dumped_labels.extend([t] * len(dump2))
sent_train.extend(sents) # at the sentences separately as individual documents
labels.extend([t] * len(sents)) # Give the label of the document to all its sentences
# dump.extend(dp)
# print "Removing %s sentences" % len(dump)
# print "\n".join(dump)
return labels, sent_train, dump, dumped_labels, f1
def split_into_sentences(data, sent_detector, vct):
sent_train = []
tokenizer = vct.build_tokenizer()
# print ("Spliting into sentences...")
## Convert the documents into sentences: train
for sentences in sent_detector.batch_tokenize(data):
sents = [s for s in sentences if len(tokenizer(s)) > 1]
sent_train.extend(sents) # at the sentences separately as individual documents
return sent_train
import re
def clean_html(data):
sent_train = []
print ("Cleaning text ... ")
for text in data:
doc = text.replace("<br>", ". ")
# doc = doc.replace("\r\n", ". ")
doc = doc.replace("<br />", ". ")
doc = re.sub(r"\.", ". ", doc)
# doc = re.sub(r"x*\.x*", ". ", doc)
sent_train.extend([doc])
return sent_train
def get_student(clf, cost_model, sent_clf, sent_token, vct, args):
from strategy import structured
cheating = args.cheating
if args.student in "rnd_sr":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_max)
student.fn_utility = student.utility_rnd
elif args.student in "rnd_srre":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_max)
student.fn_utility = student.utility_rnd
elif args.student in "rnd_srcs":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_max)
student.fn_utility = student.utility_rnd
student.class_sensitive_utility()
elif args.student in "rnd_srmv":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_max)
student.fn_utility = student.utility_rnd
student.majority_vote_utility()
elif args.student in "rnd_first1":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_fk)
student.fn_utility = student.utility_rnd
elif args.student in "rnd_rnd":
student = structured.AALRandomThenSR(model=clf, accuracy_model=None, budget=args.budget,
seed=args.seed, vcn=vct, subpool=250, cost_model=cost_model)
student.set_sent_score(student.score_rnd)
student.fn_utility = student.utility_rnd
else:
raise ValueError("Oops! We do not know that anytime strategy. Try again.")
student.set_score_model(clf) # student classifier
student.set_sentence_model(sent_clf) # cheating part, use and expert in sentences
student.set_cheating(cheating)
student.limit = args.limit
if args.calibrate:
student.set_sent_score(student.score_p0)
student.calibratescores = True
student.set_calibration_threshold(parse_parameters_mat(args.calithreshold))
student.sent_detector = sent_token
return student
| apache-2.0 |
CSLDepend/raven2_sim | plot2.py | 1 | 5623 | '''/* Runs Raven 2 simulator by calling packet generator, Raven control software, and visualization code
* Copyright (C) 2015 University of Illinois Board of Trustees, DEPEND Research Group, Creators: Homa Alemzadeh and Daniel Chen
*
* This file is part of Raven 2 Surgical Simulator.
* Plots the results of the latest run vs. the golden run
*
* Raven 2 Surgical Simulator is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Raven 2 Surgical Simulator is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Raven 2 Control. If not, see <http://www.gnu.org/licenses/>.
*/'''
import csv
import time
import os
import subprocess
import sys
import matplotlib.pyplot as plt
import math
import numpy as np
from parse_plot import *
from sys import argv
print "\nPlotting the results.."
# Get raven_home directory
env = os.environ.copy()
splits = env['ROS_PACKAGE_PATH'].split(':')
raven_home = splits[0]
# Parse the arguments
try:
script, mode = argv
except:
print "Error: missing parameters"
print 'python plot2.py 0|1'
sys.exit(2)
# Open Log files
csvfile1 = open(raven_home+'/robot_run.csv')
reader1 = csv.reader(x.replace('\0', '') for x in csvfile1)
csvfile2 = open(raven_home+'/golden_run/latest_run.csv')
reader2 = csv.reader(x.replace('\0', '') for x in csvfile2)
# Parse the robot run
orig_mpos, orig_mvel, orig_dac, orig_jpos, orig_pos, orig_err, orig_packets, orig_t = parse_latest_run(reader1)
# Parse the golden simulator run
gold_mpos, gold_mvel, gold_dac, gold_jpos, gold_pos, gold_err, gold_packets, gold_t = parse_latest_run(reader2)
#orig_mpos, orig_mvel, orig_dac, orig_jpos, orig_pos = parse_input_data(in_file)
# Parse the latest run of simulator
csvfile3 = open(raven_home+'/latest_run.csv')
reader3 = csv.reader(x.replace('\0', '') for x in csvfile3)
mpos, mvel, dac, jpos, pos, err, packet_nums, t = parse_latest_run(reader3)
# Close files
csvfile1.close()
csvfile2.close()
csvfile3.close()
plot_mpos(gold_mpos, orig_mpos, mpos, gold_mvel, orig_mvel, mvel, gold_t, orig_t, t).savefig(raven_home+'/figures/mpos_mvel.png')
plot_dacs(gold_dac, orig_dac, dac, gold_t, orig_t, t).savefig(raven_home+'/figures/dac.png')
plot_jpos(gold_jpos, orig_jpos, jpos, gold_t, orig_t, t).savefig(raven_home+'/figures/jpos.png')
plot_pos(gold_pos, orig_pos, pos, gold_t, orig_t, t).savefig(raven_home+'/figures/pos.png')
# Log the results
indices = [0,1,2,4,5,6,7]
posi = ['X','Y','Z']
if mode == 0:
output_file = raven_home+'/fault_free_log.csv'
if mode == 1:
output_file = raven_home+'/error_log.csv'
# Write the headers for new file
if not(os.path.isfile(output_file)):
csvfile4 = open(output_file,'w')
writer4 = csv.writer(csvfile4,delimiter=',')
if mode == 0:
output_line = 'Num_Packets'+','
if mode == 1:
output_line = 'Variable, Start, Duration, Value, Num_Packets, Errors, '
for i in range(0,len(mpos)):
output_line = output_line + 'err_mpos' + str(indices[i]) + ','
output_line = output_line + 'err_mvel' + str(indices[i]) + ','
output_line = output_line + 'err_jpos' + str(indices[i]) + ','
for i in range(0,len(pos)):
if (i == len(pos)-1):
output_line = output_line + 'err_pos' + str(posi[i])
else:
output_line = output_line + 'err_pos' + str(posi[i]) + ','
writer4.writerow(output_line.split(','))
csvfile4.close()
# Write the rows
csvfile4 = open(raven_home+'/fault_free_log.csv','a')
writer4 = csv.writer(csvfile4,delimiter=',')
# For faulty run, write Injection parameters
if mode == 1:
csvfile5 = open('./mfi2_params.csv','r')
inj_param_reader = csv.reader(csvfile5)
for line in inj_param_reader:
#print line
if (int(line[0]) == self.curr_inj):
param_line = line[1:]
break
csvfile5.close()
print param_line
# Write Len of Trajectory
output_line = str(len(mpos[0])) + ','
# For faulty run, write error messages and see if a jump happened
if mode == 1:
# Error messages
gold_msgs = [s for s in gold_err if s]
err_msgs = [s for s in err if s]
# If there are any errors or different errors, print them all
if err_msgs or not(err_msgs == gold_msgs):
for e in set(err_msgs):
output_line = output_line + '#Packet ' + str(packets[err.index(e)]) +': ' + e
#
output_line = output_line + ','
# Trajectory errors
mpos_error = [];
mvel_error = [];
jpos_error = [];
pos_error = [];
traj_len = min(len(mpos[0]),len(gold_mpos[0]))
for i in range(0,len(mpos)):
mpos_error.append(float(sum(abs(np.array(mpos[i][1:traj_len])-np.array(gold_mpos[i][1:traj_len]))))/traj_len)
mvel_error.append(float(sum(abs(np.array(mvel[i][1:traj_len])-np.array(gold_mvel[i][1:traj_len]))))/traj_len)
jpos_error.append(float(sum(abs(np.array(jpos[i][1:traj_len])-np.array(gold_jpos[i][1:traj_len]))))/traj_len)
output_line = output_line + str(mpos_error[i]) + ', '+ str(mvel_error[i]) +', '+ str(jpos_error[i])+','
for i in range(0,len(pos)):
pos_error.append(float(sum(abs(np.array(pos[i][1:traj_len])-np.array(gold_pos[i][1:traj_len]))))/traj_len)
if (i == len(pos)-1):
output_line = output_line + str(pos_error[i])
else:
output_line = output_line + str(pos_error[i])+','
writer4.writerow(output_line.split(','))
csvfile4.close()
| lgpl-3.0 |
waddell/urbansim | urbansim/maps/dframe_explorer.py | 5 | 4192 | from bottle import route, response, run, hook, static_file
from urbansim.utils import yamlio
import simplejson
import numpy as np
import pandas as pd
import os
from jinja2 import Environment
@hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Headers'] = \
'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
DFRAMES = {}
CONFIG = None
def get_schema():
global DFRAMES
return {name: list(DFRAMES[name].columns) for name in DFRAMES}
@route('/map_query/<table>/<filter>/<groupby>/<field:path>/<agg>', method="GET")
def map_query(table, filter, groupby, field, agg):
global DFRAMES
filter = ".query('%s')" % filter if filter != "empty" else ""
df = DFRAMES[table]
if field not in df.columns:
print "Col not found, trying eval:", field
df["eval"] = df.eval(field)
field = "eval"
cmd = "df%s.groupby('%s')['%s'].%s" % \
(filter, groupby, field, agg)
print cmd
results = eval(cmd)
results[results == np.inf] = np.nan
results = yamlio.series_to_yaml_safe(results.dropna())
results = {int(k): results[k] for k in results}
return results
@route('/map_query/<table>/<filter>/<groupby>/<field>/<agg>', method="OPTIONS")
def ans_options(table=None, filter=None, groupby=None, field=None, agg=None):
return {}
@route('/')
def index():
global CONFIG
dir = os.path.dirname(__file__)
index = open(os.path.join(dir, 'dframe_explorer.html')).read()
return Environment().from_string(index).render(CONFIG)
@route('/data/<filename>')
def data_static(filename):
return static_file(filename, root='./data')
def start(views,
center=[37.7792, -122.2191],
zoom=11,
shape_json='data/zones.json',
geom_name='ZONE_ID', # from JSON file
join_name='zone_id', # from data frames
precision=8,
port=8765,
host='localhost',
testing=False):
"""
Start the web service which serves the Pandas queries and generates the
HTML for the map. You will need to open a web browser and navigate to
http://localhost:8765 (or the specified port)
Parameters
----------
views : Python dictionary
This is the data that will be displayed in the maps. Keys are strings
(table names) and values are dataframes. Each data frame should have a
column with the name specified as join_name below
center : a Python list with two floats
The initial latitude and longitude of the center of the map
zoom : int
The initial zoom level of the map
shape_json : str
The path to the geojson file which contains that shapes that will be
displayed
geom_name : str
The field name from the JSON file which contains the id of the geometry
join_name : str
The column name from the dataframes passed as views (must be in each
view) which joins to geom_name in the shapes
precision : int
The precision of values to show in the legend on the map
port : int
The port for the web service to respond on
host : str
The hostname to run the web service from
testing : bool
Whether to print extra debug information
Returns
-------
Does not return - takes over control of the thread and responds to
queries from a web browser
"""
global DFRAMES, CONFIG
DFRAMES = {str(k): views[k] for k in views}
root = "http://{}:{}/".format(host, port)
config = {
'center': str(center),
'zoom': zoom,
'shape_json': shape_json,
'geom_name': geom_name,
'join_name': join_name,
'precision': precision,
'root': root
}
for k in views:
if join_name not in views[k].columns:
raise Exception("Join name must be present on all dataframes - "
"'%s' not present on '%s'" % (join_name, k))
config['schema'] = simplejson.dumps(get_schema())
CONFIG = config
if testing:
return
run(host=host, port=port, debug=True)
| bsd-3-clause |
Ganeshgajakosh/ml_lab_ecsc_306 | labwork/lab7/sci-learn/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| apache-2.0 |
sniemi/EuclidVisibleInstrument | analysis/spotForwardModelBlurred.py | 1 | 25495 | """
CCD Spot Measurements
=====================
Analyse laboratory CCD PSF measurements by forward modelling to the data.
The methods used here seem to work reasonably well when the spot has been well centred. If however, the
spot is e.g. 0.3 pixels off then estimating the amplitude of the Airy disc becomes rather difficult.
Unfortunately this affects the following CCD PSF estimates as well and can lead to models that are
rather far from the truth. Also, if when the width of the CCD PSF kernel becomes narrow, say 0.2, pixels
it is very difficult to recover. This most likely results from an inadequate sampling. In this case it might
be more appropriate to use "cross"-type kernel.
Because the amplitude can be very tricky to estimate, the version 1.4 (and later) implement a meta-parameter
called peak, which is the peak counts in the image. This is then converted to the amplitude by using the centroid
estimate. Because the centroids are fitted for each image, the amplitude can also vary in the joint fits. This
seems to improve the joint fitting constrains. Note however that this does couple the radius of the Airy disc
as well, because the amplitude estimate uses the x, y, and radius information as well.
One question to address is how the smoothing of the Airy disc is done. So far I have assumed that the Gaussian that
represents defocus should be centred at the same location as the Airy disc. However, if the displacement if the
Airy disc is large, then the defocus term will move the Airy disc to the direction of the displacement and make
it more asymmetric. Another option is to use scipy.ndimage.filters.gaussian_filter which simply applies Gaussian
smoothing to the input image. Based on the testing carried out this does not seem to make a huge difference. The
latter (smoothing) will lead to more or less the same CCD PSF estimates, albeit with slightly higher residuals.
We therefore adopt a Gaussian kernel that is centred with the Airy disc.
:requires: PyFITS
:requires: NumPy
:requires: SciPy
:requires: astropy
:requires: matplotlib
:requires: VISsim-Python
:requires: emcee
:requires: sklearn
:version: 0.1
:author: Sami-Matias Niemi
:contact: [email protected]
"""
import matplotlib
#matplotlib.use('pdf')
#matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 17
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.1)
matplotlib.rcParams['legend.fontsize'] = 11
matplotlib.rcParams['legend.handlelength'] = 3
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.rcParams['image.interpolation'] = 'none'
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pyfits as pf
import numpy as np
import emcee
import scipy
import scipy.ndimage.measurements as m
from scipy import signal
from scipy import ndimage
from scipy.special import j1, jn_zeros
from support import files as fileIO
from astropy.modeling import models, fitting
import triangle
import glob as g
import os, datetime
from multiprocessing import Pool
__author__ = 'Sami-Matias Niemi'
__vesion__ = 0.1
#fixed parameters
cores = 8
def forwardModel(file, out='Data', wavelength=None, gain=3.1, size=10, burn=500, spotx=2888, spoty=3514, run=700,
simulation=False, truths=None):
"""
Forward models the spot data found from the input file. Can be used with simulated and real data.
Notes:
- emcee is run three times as it is important to have a good starting point for the final run.
"""
print '\n\n\n'
print '_'*120
print 'Processing:', file
#get data and convert to electrons
o = pf.getdata(file)*gain
if simulation:
data = o
else:
#roughly the correct location - to avoid identifying e.g. cosmic rays
data = o[spoty-(size*3):spoty+(size*3)+1, spotx-(size*3):spotx+(size*3)+1].copy()
#maximum position within the cutout
y, x = m.maximum_position(data)
#spot and the peak pixel within the spot, this is also the CCD kernel position
spot = data[y-size:y+size+1, x-size:x+size+1].copy()
CCDy, CCDx = m.maximum_position(spot)
print 'CCD Kernel Position (within the postage stamp):', CCDx, CCDy
#bias estimate
if simulation:
bias = 9000.
rn = 4.5
else:
bias = np.median(o[spoty-size: spoty+size, spotx-220:spotx-20]) #works for read o
rn = np.std(o[spoty-size: spoty+size, spotx-220:spotx-20])
print 'Readnoise (e):', rn
if rn < 2. or rn > 6.:
print 'NOTE: suspicious readout noise estimate...'
print 'ADC offset (e):', bias
#remove bias
spot -= bias
#save to file
fileIO.writeFITS(spot, out+'small.fits', int=False)
#make a copy ot generate error array
data = spot.copy().flatten()
#assume that uncertanties scale as sqrt of the values + readnoise
#sigma = np.sqrt(data/gain + rn**2)
tmp = data.copy()
tmp[tmp + rn**2 < 0.] = 0. #set highly negative values to zero
var = tmp.copy() + rn**2
#Gary B. said that actually this should be from the model or is biased,
#so I only pass the readout noise part now
#fit a simple model
print 'Least Squares Fitting...'
gaus = models.Gaussian2D(spot.max(), size, size, x_stddev=0.5, y_stddev=0.5)
gaus.theta.fixed = True #fix angle
p_init = gaus
fit_p = fitting.LevMarLSQFitter()
stopy, stopx = spot.shape
X, Y = np.meshgrid(np.arange(0, stopx, 1), np.arange(0, stopy, 1))
p = fit_p(p_init, X, Y, spot)
print p
model = p(X, Y)
fileIO.writeFITS(model, out+'BasicModel.fits', int=False)
fileIO.writeFITS(model - spot, out+'BasicModelResidual.fits', int=False)
#goodness of fit
gof = (1./(np.size(data) - 5.)) * np.sum((model.flatten() - data)**2 / var)
print 'GoF:', gof
print 'Done\n\n'
#maximum value
max = np.max(spot)
peakrange = (0.9*max, 1.7*max)
sum = np.sum(spot)
print 'Maximum Value:', max
print 'Sum of the values:', sum
print 'Peak Range:', peakrange
#MCMC based fitting
print 'Bayesian Model Fitting...'
nwalkers = 1000
# Initialize the sampler with the chosen specs.
#Create the coordinates x and y
x = np.arange(0, spot.shape[1])
y = np.arange(0, spot.shape[0])
#Put the coordinates in a mesh
xx, yy = np.meshgrid(x, y)
#Flatten the arrays
xx = xx.flatten()
yy = yy.flatten()
print 'Fitting full model...'
ndim = 7
#Choose an initial set of positions for the walkers - fairly large area not to bias the results
p0 = np.zeros((nwalkers, ndim))
#peak, center_x, center_y, radius, focus, width_x, width_y = theta
p0[:, 0] = np.random.normal(max, max/100., size=nwalkers) # peak value
p0[:, 1] = np.random.normal(p.x_mean.value, 0.1, size=nwalkers) # x
p0[:, 2] = np.random.normal(p.y_mean.value, 0.1, size=nwalkers) # y
print 'Using initial guess [radius, focus, width_x, width_y]:', [1.5, 0.6, 0.02, 0.03]
p0[:, 3] = np.random.normal(1.5, 0.01, size=nwalkers) # radius
p0[:, 4] = np.random.normal(0.6, 0.01, size=nwalkers) # focus
p0[:, 5] = np.random.normal(0.02, 0.0001, size=nwalkers) # width_x
p0[:, 6] = np.random.normal(0.03, 0.0001, size=nwalkers) # width_y
#initiate sampler
pool = Pool(cores) #A hack Dan gave me to not have ghost processes running as with threads keyword
#sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[xx, yy, data, var, peakrange, spot.shape],
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior,
args=[xx, yy, data, rn**2, peakrange, spot.shape],
pool=pool)
# Run a burn-in and set new starting position
print "Burning-in..."
pos, prob, state = sampler.run_mcmc(p0, burn)
maxprob_index = np.argmax(prob)
params_fit = pos[maxprob_index]
print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)
print 'Estimate:', params_fit
sampler.reset()
print "Running MCMC..."
pos, prob, state = sampler.run_mcmc(pos, run, rstate0=state)
print "Mean acceptance fraction:", np.mean(sampler.acceptance_fraction)
#Get the index with the highest probability
maxprob_index = np.argmax(prob)
#Get the best parameters and their respective errors and print best fits
params_fit = pos[maxprob_index]
errors_fit = [sampler.flatchain[:,i].std() for i in xrange(ndim)]
_printResults(params_fit, errors_fit)
#Best fit model
peak, center_x, center_y, radius, focus, width_x, width_y = params_fit
amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=CCDx, y_0=CCDy)
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
foc = signal.convolve2d(adata, focusdata, mode='same')
CCDdata = np.array([[0.0, width_y, 0.0],
[width_x, (1.-width_y-width_y-width_x-width_x), width_x],
[0.0, width_y, 0.0]])
fileIO.writeFITS(CCDdata, 'kernel.fits', int=False)
model = signal.convolve2d(foc, CCDdata, mode='same')
#save model
fileIO.writeFITS(model, out+'model.fits', int=False)
#residuals
fileIO.writeFITS(model - spot, out+'residual.fits', int=False)
fileIO.writeFITS(((model - spot)**2 / var.reshape(spot.shape)), out+'residualSQ.fits', int=False)
# a simple goodness of fit
gof = (1./(np.size(data) - ndim)) * np.sum((model.flatten() - data)**2 / var)
maxdiff = np.max(np.abs(model - spot))
print 'GoF:', gof, ' Maximum difference:', maxdiff
if maxdiff > 2e3 or gof > 4.:
print '\nFIT UNLIKELY TO BE GOOD...\n'
print 'Amplitude estimate:', amplitude
#plot
samples = sampler.chain.reshape((-1, ndim))
extents = None
if simulation:
extents = [(0.91*truth, 1.09*truth) for truth in truths]
extents[1] = (truths[1]*0.995, truths[1]*1.005)
extents[2] = (truths[2]*0.995, truths[2]*1.005)
extents[3] = (0.395, 0.425)
extents[4] = (0.503, 0.517)
truths[0] = _peakFromTruth(truths)
print truths
fig = triangle.corner(samples,
labels=['peak', 'x', 'y', 'radius', 'focus', 'width_x', 'width_y'],
truths=truths)#, extents=extents)
fig.savefig(out+'Triangle.png')
plt.close()
pool.close()
def log_posterior(theta, x, y, z, var, peakrange, size):
"""
Posterior probability: combines the prior and likelihood.
"""
lp = log_prior(theta, peakrange)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, x, y, z, var, size)
def log_prior(theta, peakrange):
"""
Priors, limit the values to a range but otherwise flat.
"""
peak, center_x, center_y, radius, focus, width_x, width_y = theta
if 7. < center_x < 14. and 7. < center_y < 14. and 0. < width_x < 0.25 and 0. < width_y < 0.3 and \
peakrange[0] < peak < peakrange[1] and 0.4 < radius < 2. and 0.3 < focus < 2.:
return 0.
else:
return -np.inf
def log_likelihood(theta, x, y, data, var, size):
"""
Logarithm of the likelihood function.
"""
#unpack the parameters
peak, center_x, center_y, radius, focus, width_x, width_y = theta
#1)Generate a model Airy disc
amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=int(size[0]/2.-0.5), y_0=int(size[1]/2.-0.5))
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(x, y, amplitude, center_x, center_y, radius).reshape(size)
#2)Apply Focus
f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
focusdata = f.eval(x, y, 1., center_x, center_y, focus, focus, 0.).reshape(size)
model = signal.convolve2d(adata, focusdata, mode='same')
#3)Apply CCD diffusion, approximated with a Gaussian
CCDdata = np.array([[0.0, width_y, 0.0],
[width_x, (1.-width_y-width_y-width_x-width_x), width_x],
[0.0, width_y, 0.0]])
model = signal.convolve2d(model, CCDdata, mode='same').flatten()
#true for Gaussian errors
#lnL = - 0.5 * np.sum((data - model)**2 / var)
#Gary B. said that this should be from the model not data so recompute var (now contains rn**2)
var += model.copy()
lnL = - (np.size(var)*np.sum(np.log(var))) - (0.5 * np.sum((data - model)**2 / var))
return lnL
def _printResults(best_params, errors):
"""
Print basic results.
"""
print("=" * 60)
print('Fitting with MCMC:')
pars = ['peak', 'center_x', 'center_y', 'radius', 'focus', 'width_x', 'width_y']
print('*'*20 + ' Fitted parameters ' + '*'*20)
for name, value, sig in zip(pars, best_params, errors):
print("{:s} = {:e} +- {:e}" .format(name, value, sig))
print("=" * 60)
def _printFWHM(sigma_x, sigma_y, sigma_xerr, sigma_yerr, req=10.8):
"""
Print results and compare to the requirement at 800nm.
"""
print("=" * 60)
print 'FWHM (requirement %.1f microns):' % req
print round(np.sqrt(_FWHMGauss(sigma_x)*_FWHMGauss(sigma_y)), 2), ' +/- ', \
round(np.sqrt(_FWHMGauss(sigma_xerr)*_FWHMGauss(sigma_yerr)), 3) , ' microns'
print 'x:', round(_FWHMGauss(sigma_x), 2), ' +/- ', round(_FWHMGauss(sigma_xerr), 3), ' microns'
print 'y:', round(_FWHMGauss(sigma_y), 2), ' +/- ', round(_FWHMGauss(sigma_yerr), 3), ' microns'
print("=" * 60)
def _FWHMGauss(sigma, pixel=12):
"""
Returns the FWHM of a Gaussian with a given sigma.
The returned values is in microns (pixel = 12microns).
"""
return sigma*2*np.sqrt(2*np.log(2))*pixel
def _ellipticityFromGaussian(sigmax, sigmay):
"""
Ellipticity
"""
return np.abs((sigmax**2 - sigmay**2) / (sigmax**2 + sigmay**2))
def _ellipticityerr(sigmax, sigmay, sigmaxerr, sigmayerr):
"""
Error on ellipticity.
"""
e = _ellipticityFromGaussian(sigmax, sigmay)
err = e * np.sqrt((sigmaxerr/e)**2 + (sigmayerr/e)**2)
return err
def _R2FromGaussian(sigmax, sigmay, pixel=0.1):
"""
R2.
"""
return (sigmax*pixel)**2 + (sigmay*pixel)**2
def _R2err(sigmax, sigmay, sigmaxerr ,sigmayerr):
"""
Error on R2.
"""
err = np.sqrt((2*_R2FromGaussian(sigmax, sigmay))**2*sigmaxerr**2 +
(2*_R2FromGaussian(sigmax, sigmay))**2*sigmayerr**2)
return err
def _plotDifferenceIndividualVsJoined(individuals, joined, title='800nm', sigma=3,
requirementFWHM=10.8, requirementE=0.156, requirementR2=0.002,
truthx=None, truthy=None, FWHMlims=(7.6, 10.3)):
"""
Simple plot
"""
ind = []
for file in g.glob(individuals):
print file
ind.append(fileIO.cPicleRead(file))
join = fileIO.cPicleRead(joined)
xtmp = np.arange(len(ind)) + 1
#plot FWHM
fig = plt.figure()
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.12, right=0.98)
ax1.set_title(title)
wxind = np.asarray([_FWHMGauss(data['wx']) for data in ind])
wyind = np.asarray([_FWHMGauss(data['wy']) for data in ind])
wxerr = np.asarray([sigma*_FWHMGauss(data['wxerr']) for data in ind])
wyerr = np.asarray([sigma*_FWHMGauss(data['wyerr']) for data in ind])
ax1.errorbar(xtmp, wxind, yerr=wxerr, fmt='o')
ax1.errorbar(xtmp[-1]+1, _FWHMGauss(join['wx']), yerr=sigma*_FWHMGauss(join['wxerr']), fmt='s', c='r')
ax2.errorbar(xtmp, wyind, yerr=wyerr, fmt='o')
ax2.errorbar(xtmp[-1]+1, _FWHMGauss(join['wy']), yerr=sigma*_FWHMGauss(join['wyerr']), fmt='s', c='r')
geommean = np.sqrt(wxind*wyind)
err = np.sqrt(wxerr*wyerr)
ax3.errorbar(xtmp, geommean, yerr=err, fmt='o')
ax3.errorbar(xtmp[-1]+1, np.sqrt(_FWHMGauss(join['wx'])*_FWHMGauss(join['wy'])),
yerr=sigma*np.sqrt(_FWHMGauss(join['wxerr'])*_FWHMGauss(join['wyerr'])), fmt='s', c='r')
#simulations
if truthx is not None:
ax1.axhline(y=_FWHMGauss(truthx), label='Input', c='g')
if truthy is not None:
ax2.axhline(y=_FWHMGauss(truthy), label='Input', c='g')
ax3.axhline(y=np.sqrt(_FWHMGauss(truthx)*_FWHMGauss(truthy)), label='Input', c='g')
#requirements
if requirementFWHM is not None:
ax1.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='--')
ax2.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='--')
ax3.axhline(y=requirementFWHM, label='Requirement (800nm)', c='r', ls='-')
plt.sca(ax1)
plt.xticks(visible=False)
plt.sca(ax2)
plt.xticks(visible=False)
plt.sca(ax3)
ltmp = np.hstack((xtmp, xtmp[-1]+1))
plt.xticks(ltmp, ['Individual %i' % x for x in ltmp[:-1]] + ['Joint',], rotation=45)
#ax1.set_ylim(7.1, 10.2)
ax1.set_ylim(*FWHMlims)
ax2.set_ylim(*FWHMlims)
#ax2.set_ylim(8.6, 10.7)
ax3.set_ylim(*FWHMlims)
ax1.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax2.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax3.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax1.set_ylabel(r'FWHM$_{X} \, [\mu$m$]$')
ax2.set_ylabel(r'FWHM$_{Y} \, [\mu$m$]$')
#ax3.set_ylabel(r'FWHM$=\sqrt{FWHM_{X}FWHM_{Y}} \quad [\mu$m$]$')
ax3.set_ylabel(r'FWHM$ \, [\mu$m$]$')
ax1.legend(shadow=True, fancybox=True)
plt.savefig('IndividualVsJoinedFWHM%s.pdf' % title)
plt.close()
#plot R2 and ellipticity
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
fig.subplots_adjust(hspace=0, top=0.93, bottom=0.17, left=0.12, right=0.98)
ax1.set_title(title)
R2x = [_R2FromGaussian(data['wx'], data['wy'])*1e3 for data in ind]
errR2 = [sigma*1.e3*_R2err(data['wx'], data['wy'], data['wxerr'], data['wyerr']) for data in ind]
ax1.errorbar(xtmp, R2x, yerr=errR2, fmt='o')
ax1.errorbar(xtmp[-1]+1, _R2FromGaussian(join['wx'], join['wy'])*1e3,
yerr=sigma*1.e3*_R2err(join['wx'], join['wy'], join['wxerr'], join['wyerr']), fmt='s')
ell = [_ellipticityFromGaussian(data['wx'], data['wy']) for data in ind]
ellerr = [sigma*_ellipticityerr(data['wx'], data['wy'], data['wxerr'], data['wyerr']) for data in ind]
ax2.errorbar(xtmp, ell, yerr=ellerr, fmt='o')
ax2.errorbar(xtmp[-1]+1, _ellipticityFromGaussian(join['wx'], join['wy']),
yerr=sigma*_ellipticityerr(join['wx'], join['wy'], join['wxerr'], join['wyerr']), fmt='s')
if requirementE is not None:
ax2.axhline(y=requirementE, label='Requirement (800nm)', c='r')
if requirementR2 is not None:
ax1.axhline(y=requirementR2*1e3, label='Requirement (800nm)', c='r')
#simulations
if truthx and truthy is not None:
ax2.axhline(y=_ellipticityFromGaussian(truthx, truthy), label='Input', c='g')
ax1.axhline(y= _R2FromGaussian(truthx, truthy)*1e3, label='Input', c='g')
plt.sca(ax1)
plt.xticks(visible=False)
plt.sca(ax2)
ltmp = np.hstack((xtmp, xtmp[-1]+1))
plt.xticks(ltmp, ['Individual%i' % x for x in ltmp[:-1]] + ['Joint',], rotation=45)
ax1.set_ylim(0.0011*1e3, 0.003*1e3)
ax2.set_ylim(0., 0.23)
ax1.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax2.set_xlim(xtmp.min()*0.9, (xtmp.max() + 1)*1.05)
ax1.set_ylabel(r'$R^{2}$ [mas$^{2}$]')
ax2.set_ylabel('ellipticity')
ax1.legend(shadow=True, fancybox=True)
plt.savefig('IndividualVsJoinedR2e%s.pdf' % title)
plt.close()
def _plotModelResiduals(id='simulated800nmJoint1', folder='results/', out='Residual.pdf', individual=False):
"""
Generate a plot with data, model, and residuals.
"""
#data
if individual:
data = pf.getdata(folder+id+'small.fits')
data[data < 1] = 1.
data = np.log10(data)
else:
data = pf.getdata(folder+id+'datafit.fits')
data[data < 1] = 1.
data = np.log10(data)
#model
model = pf.getdata(folder+id+'model.fits ')
model[model < 1] = 1.
model = np.log10(model)
#residual
residual = pf.getdata(folder+id+'residual.fits')
#squared residual
residualSQ = pf.getdata(folder+id+'residualSQ.fits')
max = np.max((data.max(), model.max()))
#figure
fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ax = [ax1, ax2, ax3, ax4]
fig.subplots_adjust(hspace=0.05, wspace=0.3, top=0.95, bottom=0.02, left=0.02, right=0.9)
ax1.set_title('Data')
ax2.set_title('Model')
ax3.set_title('Residual')
ax4.set_title('$L^{2}$ Residual')
im1 = ax1.imshow(data, interpolation='none', vmax=max, origin='lower', vmin=0.1)
im2 = ax2.imshow(model, interpolation='none', vmax=max, origin='lower', vmin=0.1)
im3 = ax3.imshow(residual, interpolation='none', origin='lower', vmin=-100, vmax=100)
im4 = ax4.imshow(residualSQ, interpolation='none', origin='lower', vmin=0., vmax=10)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.05)
divider = make_axes_locatable(ax4)
cax4 = divider.append_axes("right", size="5%", pad=0.05)
cbar1 = plt.colorbar(im1, cax=cax1)
cbar1.set_label(r'$\log_{10}(D_{i, j} \quad [e^{-}]$)')
cbar2 = plt.colorbar(im2, cax=cax2)
cbar2.set_label(r'$\log_{10}(M_{i, j} \quad [e^{-}]$)')
cbar3 = plt.colorbar(im3, cax=cax3)
cbar3.set_label(r'$M_{i, j} - D_{i, j} \quad [e^{-}]$')
cbar4 = plt.colorbar(im4, cax=cax4)
cbar4.set_label(r'$\frac{(M_{i, j} - D_{i, j})^{2}}{\sigma_{CCD}^{2}}$')
for tmp in ax:
plt.sca(tmp)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.savefig(out)
plt.close()
def plotAllResiduals():
"""
Plot residuals of all model fits.
"""
#Joint fits
files = g.glob('results/J*.fits')
individuals = [file for file in files if 'datafit' in file]
for file in individuals:
id = file.replace('results/', '').replace('datafit.fits', '')
print 'processing:', id
_plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id)
#Individual fits
files = g.glob('results/I*.fits')
individuals = [file for file in files if 'model' in file]
for file in individuals:
id = file.replace('results/', '').replace('model.fits', '')
print 'processing:', id
_plotModelResiduals(id=id, folder='results/', out='results/%sResidual.pdf' % id, individual=True)
def _amplitudeFromPeak(peak, x, y, radius, x_0=10, y_0=10):
"""
This function can be used to estimate an Airy disc amplitude from the peak pixel, centroid and radius.
"""
rz = jn_zeros(1, 1)[0] / np.pi
r = np.sqrt((x - x_0) ** 2 + (y - y_0) ** 2) / (radius / rz)
if r == 0.:
return peak
rt = np.pi * r
z = (2.0 * j1(rt) / rt)**2
amp = peak / z
return amp
def _peakFromTruth(theta, size=21):
"""
Derive the peak value from the parameters used for simulations.
"""
amplitude, center_x, center_y, radius, focus, width_x, width_y = theta
x = np.arange(0, size)
y = np.arange(0, size)
x, y = np.meshgrid(x, y)
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(x, y, amplitude, center_x, center_y, radius)
return adata.max()
def _simpleExample(CCDx=10, CCDy=10):
spot = np.zeros((21, 21))
#Create the coordinates x and y
x = np.arange(0, spot.shape[1])
y = np.arange(0, spot.shape[0])
#Put the coordinates in a mesh
xx, yy = np.meshgrid(x, y)
peak, center_x, center_y, radius, focus, width_x, width_y = (200000, 10.1, 9.95, 0.5, 0.5, 0.03, 0.06)
amplitude = _amplitudeFromPeak(peak, center_x, center_y, radius, x_0=CCDx, y_0=CCDy)
airy = models.AiryDisk2D(amplitude, center_x, center_y, radius)
adata = airy.eval(xx, yy, amplitude, center_x, center_y, radius).reshape(spot.shape)
f = models.Gaussian2D(1., center_x, center_y, focus, focus, 0.)
focusdata = f.eval(xx, yy, 1., center_x, center_y, focus, focus, 0.).reshape(spot.shape)
foc = signal.convolve2d(adata, focusdata, mode='same')
fileIO.writeFITS(foc, 'TESTfocus.fits', int=False)
CCDdata = np.array([[0.0, width_y, 0.0],
[width_x, (1.-width_y-width_y-width_x-width_x), width_x],
[0.0, width_y, 0.0]])
model = signal.convolve2d(foc, CCDdata, mode='same')
#save model
fileIO.writeFITS(model, 'TESTkernel.fits', int=False)
def analyseOutofFocus():
"""
"""
forwardModel('data/13_24_53sEuclid.fits', wavelength='l800', out='blurred800',
spotx=2983, spoty=3760, size=10, burn=10000, run=20000)
if __name__ == '__main__':
analyseOutofFocus()
#_simpleExample() | bsd-2-clause |
snario/geopandas | tests/test_geoseries.py | 8 | 6170 | from __future__ import absolute_import
import os
import shutil
import tempfile
import numpy as np
from numpy.testing import assert_array_equal
from pandas import Series
from shapely.geometry import (Polygon, Point, LineString,
MultiPoint, MultiLineString, MultiPolygon)
from shapely.geometry.base import BaseGeometry
from geopandas import GeoSeries
from .util import unittest, geom_equals, geom_almost_equals
class TestSeries(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.t1 = Polygon([(0, 0), (1, 0), (1, 1)])
self.t2 = Polygon([(0, 0), (1, 1), (0, 1)])
self.sq = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g1 = GeoSeries([self.t1, self.sq])
self.g2 = GeoSeries([self.sq, self.t1])
self.g3 = GeoSeries([self.t1, self.t2])
self.g3.crs = {'init': 'epsg:4326', 'no_defs': True}
self.g4 = GeoSeries([self.t2, self.t1])
self.na = GeoSeries([self.t1, self.t2, Polygon()])
self.na_none = GeoSeries([self.t1, self.t2, None])
self.a1 = self.g1.copy()
self.a1.index = ['A', 'B']
self.a2 = self.g2.copy()
self.a2.index = ['B', 'C']
self.esb = Point(-73.9847, 40.7484)
self.sol = Point(-74.0446, 40.6893)
self.landmarks = GeoSeries([self.esb, self.sol],
crs={'init': 'epsg:4326', 'no_defs': True})
self.l1 = LineString([(0, 0), (0, 1), (1, 1)])
self.l2 = LineString([(0, 0), (1, 0), (1, 1), (0, 1)])
self.g5 = GeoSeries([self.l1, self.l2])
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_single_geom_constructor(self):
p = Point(1,2)
line = LineString([(2, 3), (4, 5), (5, 6)])
poly = Polygon([(0, 0), (1, 0), (1, 1)],
[[(.1, .1), (.9, .1), (.9, .9)]])
mp = MultiPoint([(1, 2), (3, 4), (5, 6)])
mline = MultiLineString([[(1, 2), (3, 4), (5, 6)], [(7, 8), (9, 10)]])
poly2 = Polygon([(1, 1), (1, -1), (-1, -1), (-1, 1)],
[[(.5, .5), (.5, -.5), (-.5, -.5), (-.5, .5)]])
mpoly = MultiPolygon([poly, poly2])
geoms = [p, line, poly, mp, mline, mpoly]
index = ['a', 'b', 'c', 'd']
for g in geoms:
gs = GeoSeries(g)
self.assert_(len(gs) == 1)
self.assert_(gs.iloc[0] is g)
gs = GeoSeries(g, index=index)
self.assert_(len(gs) == len(index))
for x in gs:
self.assert_(x is g)
def test_copy(self):
gc = self.g3.copy()
self.assertTrue(type(gc) is GeoSeries)
self.assertEqual(self.g3.name, gc.name)
self.assertEqual(self.g3.crs, gc.crs)
def test_in(self):
self.assertTrue(self.t1 in self.g1)
self.assertTrue(self.sq in self.g1)
self.assertTrue(self.t1 in self.a1)
self.assertTrue(self.t2 in self.g3)
self.assertTrue(self.sq not in self.g3)
self.assertTrue(5 not in self.g3)
def test_geom_equals(self):
self.assertTrue(np.alltrue(self.g1.geom_equals(self.g1)))
assert_array_equal(self.g1.geom_equals(self.sq), [False, True])
def test_geom_equals_align(self):
a = self.a1.geom_equals(self.a2)
self.assertFalse(a['A'])
self.assertTrue(a['B'])
self.assertFalse(a['C'])
def test_align(self):
a1, a2 = self.a1.align(self.a2)
self.assertTrue(a2['A'].is_empty)
self.assertTrue(a1['B'].equals(a2['B']))
self.assertTrue(a1['C'].is_empty)
def test_geom_almost_equals(self):
# TODO: test decimal parameter
self.assertTrue(np.alltrue(self.g1.geom_almost_equals(self.g1)))
assert_array_equal(self.g1.geom_almost_equals(self.sq), [False, True])
def test_geom_equals_exact(self):
# TODO: test tolerance parameter
self.assertTrue(np.alltrue(self.g1.geom_equals_exact(self.g1, 0.001)))
assert_array_equal(self.g1.geom_equals_exact(self.sq, 0.001), [False, True])
def test_to_file(self):
""" Test to_file and from_file """
tempfilename = os.path.join(self.tempdir, 'test.shp')
self.g3.to_file(tempfilename)
# Read layer back in?
s = GeoSeries.from_file(tempfilename)
self.assertTrue(all(self.g3.geom_equals(s)))
# TODO: compare crs
def test_representative_point(self):
self.assertTrue(np.alltrue(self.g1.contains(self.g1.representative_point())))
self.assertTrue(np.alltrue(self.g2.contains(self.g2.representative_point())))
self.assertTrue(np.alltrue(self.g3.contains(self.g3.representative_point())))
self.assertTrue(np.alltrue(self.g4.contains(self.g4.representative_point())))
def test_transform(self):
utm18n = self.landmarks.to_crs(epsg=26918)
lonlat = utm18n.to_crs(epsg=4326)
self.assertTrue(np.alltrue(self.landmarks.geom_almost_equals(lonlat)))
with self.assertRaises(ValueError):
self.g1.to_crs(epsg=4326)
with self.assertRaises(TypeError):
self.landmarks.to_crs(crs=None, epsg=None)
def test_fillna(self):
na = self.na_none.fillna(Point())
self.assertTrue(isinstance(na[2], BaseGeometry))
self.assertTrue(na[2].is_empty)
self.assertTrue(geom_equals(self.na_none[:2], na[:2]))
# XXX: method works inconsistently for different pandas versions
#self.na_none.fillna(method='backfill')
def test_coord_slice(self):
""" Test CoordinateSlicer """
# need some better test cases
self.assertTrue(geom_equals(self.g3, self.g3.cx[:, :]))
self.assertTrue(geom_equals(self.g3[[True, False]], self.g3.cx[0.9:, :0.1]))
self.assertTrue(geom_equals(self.g3[[False, True]], self.g3.cx[0:0.1, 0.9:1.0]))
def test_geoseries_geointerface(self):
self.assertEqual(self.g1.__geo_interface__['type'], 'FeatureCollection')
self.assertEqual(len(self.g1.__geo_interface__['features']),
self.g1.shape[0])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
gidden/salamanca | tests/test_calibrate_ineq.py | 1 | 5111 | import pytest
from pytest import approx
import numpy as np
import pandas as pd
import pyomo.environ as pyo
from pandas.testing import assert_series_equal
from salamanca import ineq
from salamanca.models.calibrate_ineq import Model, Model1, Model2, Model3, Model4, Model4b
has_ipopt = pyo.SolverFactory('ipopt').available() == True
ipopt_reason = 'IPopt Solver not available'
def data():
natdata = pd.Series({
'n': 20,
'i': 105,
'gini': 0.4,
})
subdata = pd.DataFrame({
'n': [5, 10],
'i': [10, 5],
'gini': [0.5, 0.3],
}, index=['foo', 'bar'])
return natdata, subdata
def test_model_data_pop():
# note all subdata order is swapped in model_data due to sorting by gini
natdata, subdata = data()
model = Model(natdata, subdata)
# pop
obs = model.model_data['N']
exp = natdata['n']
assert obs == approx(exp)
obs = model.model_data['n']
exp = subdata['n'] * natdata['n'] / subdata['n'].sum()
assert obs == approx(exp[::-1])
def test_model_data_inc():
# note all subdata order is swapped in model_data due to sorting by gini
natdata, subdata = data()
model = Model(natdata, subdata)
# inc
obs = model.model_data['G']
exp = natdata['n'] * natdata['i']
assert obs == approx(exp)
obs = model.model_data['g']
expn = subdata['n'] * natdata['n'] / subdata['n'].sum()
exp = (subdata['i'] * expn) * (natdata['i'] * natdata['n']) \
/ (subdata['i'] * expn).sum()
assert obs == approx(exp[::-1])
def test_model_data_ineq():
# note all subdata order is swapped in model_data due to sorting by gini
natdata, subdata = data()
model = Model(natdata, subdata)
# ineq
obs = model.model_data['t']
exp = ineq.gini_to_theil(subdata['gini'].values, empirical=False)
assert obs == approx(exp[::-1])
def test_model_data_idx():
# note all subdata order is swapped in model_data due to sorting by gini
natdata, subdata = data()
model = Model(natdata, subdata)
# indicies
obs = model.orig_idx.values
exp = np.array(['foo', 'bar'])
assert (obs == exp).all()
obs = model.sorted_idx.values
exp = np.array(['bar', 'foo'])
assert (obs == exp).all()
obs = model.model_idx.values
exp = np.array([0, 1])
assert (obs == exp).all()
def test_model_data_error():
natdata, subdata = data()
ndf = natdata.copy().drop('n')
with pytest.raises(ValueError):
Model(ndf, subdata)
sdf = natdata.copy().drop('n')
with pytest.raises(ValueError):
Model(natdata, sdf)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model1_full():
natdata, subdata = data()
model = Model1(natdata, subdata)
model.construct()
model.solve()
# this is the theil result
# equivalent ginis are: 0.19798731, 0.45663392
obs = model.solution
# solution is ordered small to large
exp = np.array([0.062872, 0.369337])
assert obs.values == approx(exp, abs=1e-5)
df = model.result()
obs = sorted(df.columns)
exp = ['gini', 'gini_orig', 'i', 'i_orig', 'n', 'n_orig']
assert obs == exp
obs = df['gini_orig']
exp = subdata['gini']
assert_series_equal(obs, exp, check_names=False)
obs = df['i_orig']
exp = subdata['i']
assert_series_equal(obs, exp, check_names=False)
obs = df['n_orig']
exp = subdata['n']
assert_series_equal(obs, exp, check_names=False)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model1_result():
natdata, subdata = data()
model = Model1(natdata, subdata)
model.construct()
model.solve()
# ginis in original order
obs = model.result()['gini'].values
exp = [0.45663392, 0.19798731]
assert obs == approx(exp)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model2_result():
natdata, subdata = data()
model = Model2(natdata, subdata)
model.construct()
model.solve()
# ginis in original order
obs = model.result()['gini'].values
exp = [0.45663392, 0.19798731]
assert obs == approx(exp)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model3_result():
natdata, subdata = data()
model = Model3(natdata, subdata)
model.construct()
model.solve()
# ginis in original order
obs = model.result()['gini'].values
exp = [0.43521867, 0.24902784]
assert obs == approx(exp)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model4_result():
natdata, subdata = data()
model = Model4(natdata, subdata)
model.construct()
model.solve()
# ginis in original order
obs = model.result()['gini'].values
exp = [0.41473959, 0.28608873]
assert obs == approx(exp)
@pytest.mark.skipif(not has_ipopt, reason=ipopt_reason)
def test_Model4b_result():
natdata, subdata = data()
model = Model4b(natdata, subdata)
model.construct()
model.solve()
# ginis in original order
obs = model.result()['gini'].values
exp = [0.48849954, 0.0277764]
assert obs == approx(exp)
| apache-2.0 |
yl565/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
danforthcenter/plantcv | plantcv/plantcv/threshold/threshold_methods.py | 2 | 29885 | # Threshold functions
import os
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
from skimage.feature import greycomatrix, greycoprops
from scipy.ndimage import generic_filter
from plantcv.plantcv._debug import _debug
# Binary threshold
def binary(gray_img, threshold, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the threshold value.
Inputs:
gray_img = Grayscale image data
threshold = Threshold value (0-255)
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param threshold: int
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, threshold, max_value, threshold_method, "_binary_threshold_")
return bin_img
# Gaussian adaptive threshold
def gaussian(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the Gaussian adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, threshold_method,
"_gaussian_threshold_")
return bin_img
# Mean adaptive threshold
def mean(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image based on the mean adaptive threshold method.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
bin_img = _call_adaptive_threshold(gray_img, max_value, cv2.ADAPTIVE_THRESH_MEAN_C, threshold_method,
"_mean_threshold_")
return bin_img
# Otsu autothreshold
def otsu(gray_img, max_value, object_type="light"):
"""Creates a binary image from a grayscale image using Otsu's thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:return bin_img: numpy.ndarray
"""
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, 0, max_value, threshold_method, "_otsu_threshold_")
return bin_img
# Triangle autothreshold
def triangle(gray_img, max_value, object_type="light", xstep=1):
"""Creates a binary image from a grayscale image using Zack et al.'s (1977) thresholding.
Inputs:
gray_img = Grayscale image data
max_value = value to apply above threshold (usually 255 = white)
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
xstep = value to move along x-axis to determine the points from which to calculate distance recommended to
start at 1 and change if needed)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param max_value: int
:param object_type: str
:param xstep: int
:return bin_img: numpy.ndarray
"""
# Calculate automatic threshold value based on triangle algorithm
hist = cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# Make histogram one array
newhist = []
for item in hist:
newhist.extend(item)
# Detect peaks
show = False
if params.debug == "plot":
show = True
ind = _detect_peaks(newhist, mph=None, mpd=1, show=show)
# Find point corresponding to highest peak
# Find intensity value (y) of highest peak
max_peak_int = max(list(newhist[i] for i in ind))
# Find value (x) of highest peak
max_peak = [i for i, x in enumerate(newhist) if x == max(newhist)]
# Combine x,y
max_peak_xy = [max_peak[0], max_peak_int]
# Find final point at end of long tail
end_x = len(newhist) - 1
end_y = newhist[end_x]
end_xy = [end_x, end_y]
# Define the known points
points = [max_peak_xy, end_xy]
x_coords, y_coords = zip(*points)
# Get threshold value
peaks = []
dists = []
for i in range(x_coords[0], x_coords[1], xstep):
distance = (((x_coords[1] - x_coords[0]) * (y_coords[0] - hist[i])) -
((x_coords[0] - i) * (y_coords[1] - y_coords[0]))) / math.sqrt(
(float(x_coords[1]) - float(x_coords[0])) *
(float(x_coords[1]) - float(x_coords[0])) +
((float(y_coords[1]) - float(y_coords[0])) *
(float(y_coords[1]) - float(y_coords[0]))))
peaks.append(i)
dists.append(distance)
autothresh = [peaks[x] for x in [i for i, x in enumerate(list(dists)) if x == max(list(dists))]]
autothreshval = autothresh[0]
# Set the threshold method
threshold_method = ""
if object_type.upper() == "LIGHT":
threshold_method = cv2.THRESH_BINARY + cv2.THRESH_OTSU
elif object_type.upper() == "DARK":
threshold_method = cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU
else:
fatal_error('Object type ' + str(object_type) + ' is not "light" or "dark"!')
params.device += 1
# Threshold the image
bin_img = _call_threshold(gray_img, autothreshval, max_value, threshold_method, "_triangle_threshold_")
# Additional figures created by this method, if debug is on
if params.debug is not None:
if params.debug == 'print':
_, ax = plt.subplots()
ax.plot(hist)
ax.set(title='Threshold value = {t}'.format(t=autothreshval))
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
fig_name_hist = os.path.join(params.debug_outdir,
str(params.device) + '_triangle_thresh_hist_' + str(autothreshval) + ".png")
# write the figure to current directory
plt.savefig(fig_name_hist, dpi=params.dpi)
# close pyplot plotting window
plt.clf()
elif params.debug == 'plot':
print('Threshold value = {t}'.format(t=autothreshval))
_, ax = plt.subplots()
ax.plot(hist)
ax.axis([0, 256, 0, max(hist)])
ax.grid(True)
plt.show()
return bin_img
def texture(gray_img, ksize, threshold, offset=3, texture_method='dissimilarity', borders='nearest',
max_value=255):
"""Creates a binary image from a grayscale image using skimage texture calculation for thresholding.
This function is quite slow.
Inputs:
gray_img = Grayscale image data
ksize = Kernel size for texture measure calculation
threshold = Threshold value (0-255)
offset = Distance offsets
texture_method = Feature of a grey level co-occurrence matrix, either
'contrast', 'dissimilarity', 'homogeneity', 'ASM', 'energy',
or 'correlation'.For equations of different features see
scikit-image.
borders = How the array borders are handled, either 'reflect',
'constant', 'nearest', 'mirror', or 'wrap'
max_value = Value to apply above threshold (usually 255 = white)
Returns:
bin_img = Thresholded, binary image
:param gray_img: numpy.ndarray
:param ksize: int
:param threshold: int
:param offset: int
:param texture_method: str
:param borders: str
:param max_value: int
:return bin_img: numpy.ndarray
"""
# Function that calculates the texture of a kernel
def calc_texture(inputs):
inputs = np.reshape(a=inputs, newshape=[ksize, ksize])
inputs = inputs.astype(np.uint8)
# Greycomatrix takes image, distance offset, angles (in radians), symmetric, and normed
# http://scikit-image.org/docs/dev/api/skimage.feature.html#skimage.feature.greycomatrix
glcm = greycomatrix(inputs, [offset], [0], 256, symmetric=True, normed=True)
diss = greycoprops(glcm, texture_method)[0, 0]
return diss
# Make an array the same size as the original image
output = np.zeros(gray_img.shape, dtype=gray_img.dtype)
# Apply the texture function over the whole image
generic_filter(gray_img, calc_texture, size=ksize, output=output, mode=borders)
# Threshold so higher texture measurements stand out
bin_img = binary(gray_img=output, threshold=threshold, max_value=max_value, object_type='light')
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + "_texture_mask.png"))
return bin_img
def custom_range(img, lower_thresh, upper_thresh, channel='gray'):
"""Creates a thresholded image and mask from an RGB image and threshold values.
Inputs:
img = RGB or grayscale image data
lower_thresh = List of lower threshold values (0-255)
upper_thresh = List of upper threshold values (0-255)
channel = Color-space channels of interest (RGB, HSV, LAB, or gray)
Returns:
mask = Mask, binary image
masked_img = Masked image, keeping the part of image of interest
:param img: numpy.ndarray
:param lower_thresh: list
:param upper_thresh: list
:param channel: str
:return mask: numpy.ndarray
:return masked_img: numpy.ndarray
"""
if channel.upper() == 'HSV':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the HSV colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to HSV colorspace
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# Separate channels
hue = hsv_img[:, :, 0]
sat = hsv_img[:, :, 1]
value = hsv_img[:, :, 2]
# Make a mask for each channel
h_mask = cv2.inRange(hue, lower_thresh[0], upper_thresh[0])
s_mask = cv2.inRange(sat, lower_thresh[1], upper_thresh[1])
v_mask = cv2.inRange(value, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=h_mask)
result = cv2.bitwise_and(result, result, mask=s_mask)
masked_img = cv2.bitwise_and(result, result, mask=v_mask)
# Combine masks
mask = cv2.bitwise_and(s_mask, h_mask)
mask = cv2.bitwise_and(mask, v_mask)
elif channel.upper() == 'RGB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the RGB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Separate channels (pcv.readimage reads RGB images in as BGR)
blue = img[:, :, 0]
green = img[:, :, 1]
red = img[:, :, 2]
# Make a mask for each channel
b_mask = cv2.inRange(blue, lower_thresh[2], upper_thresh[2])
g_mask = cv2.inRange(green, lower_thresh[1], upper_thresh[1])
r_mask = cv2.inRange(red, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=b_mask)
result = cv2.bitwise_and(result, result, mask=g_mask)
masked_img = cv2.bitwise_and(result, result, mask=r_mask)
# Combine masks
mask = cv2.bitwise_and(b_mask, g_mask)
mask = cv2.bitwise_and(mask, r_mask)
elif channel.upper() == 'LAB':
# Check threshold inputs
if not (len(lower_thresh) == 3 and len(upper_thresh) == 3):
fatal_error("If using the LAB colorspace, 3 thresholds are needed for both lower_thresh and " +
"upper_thresh. If thresholding isn't needed for a channel, set lower_thresh=0 and " +
"upper_thresh=255")
# Convert the RGB image to LAB colorspace
lab_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
# Separate channels (pcv.readimage reads RGB images in as BGR)
lightness = lab_img[:, :, 0]
green_magenta = lab_img[:, :, 1]
blue_yellow = lab_img[:, :, 2]
# Make a mask for each channel
l_mask = cv2.inRange(lightness, lower_thresh[0], upper_thresh[0])
gm_mask = cv2.inRange(green_magenta, lower_thresh[1], upper_thresh[1])
by_mask = cv2.inRange(blue_yellow, lower_thresh[2], upper_thresh[2])
# Apply the masks to the image
result = cv2.bitwise_and(img, img, mask=l_mask)
result = cv2.bitwise_and(result, result, mask=gm_mask)
masked_img = cv2.bitwise_and(result, result, mask=by_mask)
# Combine masks
mask = cv2.bitwise_and(l_mask, gm_mask)
mask = cv2.bitwise_and(mask, by_mask)
elif channel.upper() == 'GRAY' or channel.upper() == 'GREY':
# Check threshold input
if not (len(lower_thresh) == 1 and len(upper_thresh) == 1):
fatal_error("If useing a grayscale colorspace, 1 threshold is needed for both the " +
"lower_thresh and upper_thresh.")
if len(np.shape(img)) == 3:
# Convert RGB image to grayscale colorspace
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray_img = img
# Make a mask
mask = cv2.inRange(gray_img, lower_thresh[0], upper_thresh[0])
# Apply the masks to the image
masked_img = cv2.bitwise_and(img, img, mask=mask)
else:
fatal_error(str(channel) + " is not a valid colorspace. Channel must be either 'RGB', 'HSV', or 'gray'.")
# Auto-increment the device counter
# Print or plot the binary image if debug is on
_debug(visual=masked_img, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh.png'))
_debug(visual=mask, filename=os.path.join(params.debug_outdir,
str(params.device) + channel + 'custom_thresh_mask.png'))
return mask, masked_img
# Internal method for calling the OpenCV threshold function to reduce code duplication
def _call_threshold(gray_img, threshold, max_value, threshold_method, method_name):
# Threshold the image
ret, bin_img = cv2.threshold(gray_img, threshold, max_value, threshold_method)
if bin_img.dtype != 'uint16':
bin_img = np.uint8(bin_img)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir,
str(params.device) + method_name + str(threshold) + '.png'))
return bin_img
# Internal method for calling the OpenCV adaptiveThreshold function to reduce code duplication
def _call_adaptive_threshold(gray_img, max_value, adaptive_method, threshold_method, method_name):
# Threshold the image
bin_img = cv2.adaptiveThreshold(gray_img, max_value, adaptive_method, threshold_method, 11, 2)
# Print or plot the binary image if debug is on
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device) + method_name + '.png'))
return bin_img
# Internal method for detecting peaks for the triangle autothreshold method
def _detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=None):
"""Marcos Duarte, https://github.com/demotu/BMC; version 1.0.4; license MIT
Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indices of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
from detect_peaks import detect_peaks
x = np.random.randn(100)
x[60:81] = np.nan
# detect all peaks and plot data
ind = detect_peaks(x, show=True)
print(ind)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# set minimum peak height = 0 and minimum peak distance = 20
detect_peaks(x, mph=0, mpd=20, show=True)
x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
# set minimum peak distance = 2
detect_peaks(x, mpd=2, show=True)
x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
# detection of valleys instead of peaks
detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
x = [0, 1, 1, 0, 1, 1, 0]
# detect both edges
detect_peaks(x, edge='both', show=True)
x = [-2, 1, -2, 2, 1, 1, 3, 0]
# set threshold = 2
detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
# It is always the case that x.size=256 since 256 hardcoded in line 186 ->
# cv2.calcHist([gray_img], [0], None, [256], [0, 255])
# if x.size < 3:
# return np.array([], dtype=int)
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
# find indices of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.inf
# dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
# # Where this function is used it is hardcoded to use the default edge='rising' so we will never have
# # edge=None, thus this will never be used
# if not edge:
# ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
# # Where this function is used it is hardcoded to use the default edge='rising' so this will never be used
# if edge.lower() in ['falling', 'both']:
# ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# x will never contain NaN since calcHist will never return NaN
# if ind.size and indnan.size:
# # NaN's and values close to NaN's cannot be peaks
# ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan - 1, indnan + 1))), invert=True)]
# first and last values of x cannot be peaks
# if ind.size and ind[0] == 0:
# ind = ind[1:]
# if ind.size and ind[-1] == x.size - 1:
# ind = ind[:-1]
# We think the above code will never be reached given some of the hardcoded properties used
# # Where this function is used has hardcoded mph=None so this will never be used
# # remove peaks < minimum peak height
# if ind.size and mph is not None:
# ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
# # Where this function is used threshold is hardcoded to the default threshold=0 so this will never be used
# if ind.size and threshold > 0:
# dx = np.min(np.vstack([x[ind] - x[ind - 1], x[ind] - x[ind + 1]]), axis=0)
# ind = np.delete(ind, np.where(dx < threshold)[0])
# # Where this function is used has hardcoded mpd=1 so this will never be used
# # detect small peaks closer than minimum peak distance
# if ind.size and mpd > 1:
# ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
# idel = np.zeros(ind.size, dtype=bool)
# for i in range(ind.size):
# if not idel[i]:
# # keep peaks with the same height if kpsh is True
# idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
# & (x[ind[i]] > x[ind] if kpsh else True)
# idel[i] = 0 # Keep current peak
# # remove the small peaks and sort back the indices by their occurrence
# ind = np.sort(ind[~idel])
if show:
# x will never contain NaN since calcHist will never return NaN
# if indnan.size:
# x[indnan] = np.nan
# # Where this function is used it is hardcoded to use the default valley=False so this will never be used
# if valley:
# x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
# Internal plotting function for the triangle autothreshold method
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(x, 'b', lw=1)
if ind.size:
label = 'valley' if valley else 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1)
ax.set_xlim(-.02 * x.size, x.size * 1.02 - 1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1 * yrange, ymax + 0.1 * yrange)
ax.set_xlabel('Data #', fontsize=14)
ax.set_ylabel('Amplitude', fontsize=14)
mode = 'Valley detection' if valley else 'Peak detection'
ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
% (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
plt.show()
def saturation(rgb_img, threshold=255, channel="any"):
"""Return a mask filtering out saturated pixels.
Inputs:
rgb_img = RGB image
threshold = value for threshold, above which is considered saturated
channel = how many channels must be saturated for the pixel to be masked out ("any", "all")
Returns:
masked_img = A binary image with the saturated regions blacked out.
:param rgb_img: np.ndarray
:param threshold: int
:param channel: str
:return masked_img: np.ndarray
"""
# Mask red, green, and blue saturation separately
b, g, r = cv2.split(rgb_img)
b_saturated = cv2.inRange(b, threshold, 255)
g_saturated = cv2.inRange(g, threshold, 255)
r_saturated = cv2.inRange(r, threshold, 255)
# Combine channel masks
if channel.lower() == "any":
# Consider a pixel saturated if any channel is saturated
saturated = cv2.bitwise_or(b_saturated, g_saturated)
saturated = cv2.bitwise_or(saturated, r_saturated)
elif channel.lower() == "all":
# Consider a pixel saturated only if all channels are saturated
saturated = cv2.bitwise_and(b_saturated, g_saturated)
saturated = cv2.bitwise_and(saturated, r_saturated)
else:
fatal_error(str(channel) + " is not a valid option. Channel must be either 'any', or 'all'.")
# Invert "saturated" before returning, so saturated = black
bin_img = cv2.bitwise_not(saturated)
_debug(visual=bin_img, filename=os.path.join(params.debug_outdir, str(params.device), '_saturation_threshold.png'))
return bin_img
def mask_bad(float_img, bad_type='native'):
""" Create a mask with desired "bad" pixels of the input floaat image marked.
Inputs:
float_img = image represented by an nd-array (data type: float). Most probably, it is the result of some
calculation based on the original image. So the datatype is float, and it is possible to have some
"bad" values, i.e. nan and/or inf
bad_type = definition of "bad" type, can be 'nan', 'inf' or 'native'
Returns:
mask = A mask indicating the locations of "bad" pixels
:param float_img: numpy.ndarray
:param bad_type: str
:return mask: numpy.ndarray
"""
size_img = np.shape(float_img)
if len(size_img) != 2:
fatal_error('Input image is not a single channel image!')
mask = np.zeros(size_img, dtype='uint8')
idx_nan, idy_nan = np.where(np.isnan(float_img) == 1)
idx_inf, idy_inf = np.where(np.isinf(float_img) == 1)
# neither nan nor inf exists in the image, print out a message and the mask would just be all zero
if len(idx_nan) == 0 and len(idx_inf) == 0:
mask = mask
print('Neither nan nor inf appears in the current image.')
# at least one of the "bad" exists
# desired bad to mark is "native"
elif bad_type.lower() == 'native':
# mask[np.isnan(gray_img)] = 255
# mask[np.isinf(gray_img)] = 255
mask[idx_nan, idy_nan] = 255
mask[idx_inf, idy_inf] = 255
elif bad_type.lower() == 'nan' and len(idx_nan) >= 1:
mask[idx_nan, idy_nan] = 255
elif bad_type.lower() == 'inf' and len(idx_inf) >= 1:
mask[idx_inf, idy_inf] = 255
# "bad" exists but not the user desired bad type, return the all-zero mask
else:
mask = mask
print('{} does not appear in the current image.'.format(bad_type.lower()))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + "_bad_mask.png"))
return mask
| mit |
hetland/xray | xray/core/utils.py | 2 | 11556 | """Internal utilties; not for external use
"""
import contextlib
import datetime
import functools
import itertools
import re
import traceback
import warnings
from collections import Mapping, MutableMapping
import numpy as np
import pandas as pd
from . import ops
from .pycompat import iteritems, OrderedDict, PY3
def alias_warning(old_name, new_name, stacklevel=3): # pragma: no cover
warnings.warn('%s has been deprecated and renamed to %s'
% (old_name, new_name),
FutureWarning, stacklevel=stacklevel)
def function_alias(obj, old_name): # pragma: no cover
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
return wrapper
def class_alias(obj, old_name): # pragma: no cover
class Wrapper(obj):
def __new__(cls, *args, **kwargs):
alias_warning(old_name, obj.__name__)
return super(Wrapper, cls).__new__(cls, *args, **kwargs)
Wrapper.__name__ = obj.__name__
return Wrapper
def safe_cast_to_index(array):
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return index
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first, second):
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray
"""
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return ops.array_equiv(first, second)
else:
return first is second or first == second
def peek_at(iterable):
"""Returns the first value from iterable, as well as a new iterable with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
def update_safety_check(first_dict, second_dict, compat=equivalent):
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in iteritems(second_dict):
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
def remove_incompatible_items(first_dict, second_dict, compat=equivalent):
"""Remove incompatible items from the first dictionary in-place.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k in list(first_dict):
if (k not in second_dict
or (k in second_dict and
not compat(first_dict[k], second_dict[k]))):
del first_dict[k]
def is_dict_like(value):
return hasattr(value, '__getitem__') and hasattr(value, 'keys')
def is_full_slice(value):
return isinstance(value, slice) and value == slice(None)
def combine_pos_and_kw_args(pos_kwargs, kw_kwargs, func_name):
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError('the first argument to .%s must be a dictionary'
% func_name)
if kw_kwargs:
raise ValueError('cannot specify both keyword and positional '
'arguments to .%s' % func_name)
return pos_kwargs
else:
return kw_kwargs
_SCALAR_TYPES = (datetime.datetime, datetime.date, datetime.timedelta)
def is_scalar(value):
"""np.isscalar only works on primitive numeric types and (bizarrely)
excludes 0-d ndarrays; this version does more comprehensive checks
"""
if hasattr(value, 'ndim'):
return value.ndim == 0
return (np.isscalar(value) or
isinstance(value, _SCALAR_TYPES) or
value is None)
def dict_equiv(first, second, compat=equivalent):
"""Test equivalence of two dict-like objects. If any of the values are
numpy arrays, compare them correctly.
Parameters
----------
first, second : dict-like
Dictionaries to compare for equality
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
equals : bool
True if the dictionaries are equal
"""
for k in first:
if k not in second or not compat(first[k], second[k]):
return False
for k in second:
if k not in first:
return False
return True
def ordered_dict_intersection(first_dict, second_dict, compat=equivalent):
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
class SingleSlotPickleMixin(object):
"""Mixin class to add the ability to pickle objects whose state is defined
by a single __slots__ attribute. Only necessary under Python 2.
"""
def __getstate__(self):
return getattr(self, self.__slots__[0])
def __setstate__(self, state):
setattr(self, self.__slots__[0], state)
class Frozen(Mapping, SingleSlotPickleMixin):
"""Wrapper around an object implementing the mapping interface to make it
immutable. If you really want to modify the mapping, the mutable version is
saved under the `mapping` attribute.
"""
__slots__ = ['mapping']
def __init__(self, mapping):
self.mapping = mapping
def __getitem__(self, key):
return self.mapping[key]
def __iter__(self):
return iter(self.mapping)
def __len__(self):
return len(self.mapping)
def __contains__(self, key):
return key in self.mapping
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.mapping)
def FrozenOrderedDict(*args, **kwargs):
return Frozen(OrderedDict(*args, **kwargs))
class SortedKeysDict(MutableMapping, SingleSlotPickleMixin):
"""An wrapper for dictionary-like objects that always iterates over its
items in sorted order by key but is otherwise equivalent to the underlying
mapping.
"""
__slots__ = ['mapping']
def __init__(self, mapping=None):
self.mapping = {} if mapping is None else mapping
def __getitem__(self, key):
return self.mapping[key]
def __setitem__(self, key, value):
self.mapping[key] = value
def __delitem__(self, key):
del self.mapping[key]
def __iter__(self):
return iter(sorted(self.mapping))
def __len__(self):
return len(self.mapping)
def __contains__(self, key):
return key in self.mapping
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.mapping)
def copy(self):
return type(self)(self.mapping.copy())
class ChainMap(MutableMapping, SingleSlotPickleMixin):
"""Partial backport of collections.ChainMap from Python>=3.3
Don't return this from any public APIs, since some of the public methods
for a MutableMapping are missing (they will raise a NotImplementedError)
"""
__slots__ = ['maps']
def __init__(self, *maps):
self.maps = maps
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, value): # pragma: no cover
raise NotImplementedError
def __iter__(self):
seen = set()
for mapping in self.maps:
for item in mapping:
if item not in seen:
yield item
seen.add(item)
def __len__(self):
raise len(iter(self))
class NdimSizeLenMixin(object):
"""Mixin class that extends a class that defines a ``shape`` property to
one that also defines ``ndim``, ``size`` and ``__len__``.
"""
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
# cast to int so that shape = () gives size = 1
return int(np.prod(self.shape))
def __len__(self):
try:
return self.shape[0]
except IndexError:
raise TypeError('len() of unsized object')
class NDArrayMixin(NdimSizeLenMixin):
"""Mixin class for making wrappers of N-dimensional arrays that conform to
the ndarray interface required for the data argument to Variable objects.
A subclass should set the `array` property and override one or more of
`dtype`, `shape` and `__getitem__`.
"""
@property
def dtype(self):
return self.array.dtype
@property
def shape(self):
return self.array.shape
def __array__(self, dtype=None):
return np.asarray(self[...], dtype=dtype)
def __getitem__(self, key):
return self.array[key]
def __repr__(self):
return '%s(array=%r)' % (type(self).__name__, self.array)
@contextlib.contextmanager
def close_on_error(f):
"""Context manager to ensure that a file opened by xray is closed if an
exception is raised before the user sees the file object.
"""
try:
yield
except Exception:
f.close()
raise
def is_remote_uri(path):
return bool(re.search('^https?\://', path))
| apache-2.0 |
capitalk/treelearn | treelearn/base_ensemble.py | 1 | 5247 | # TreeLearn
#
# Copyright (C) Capital K Partners
# Author: Alex Rubinsteyn
# Contact: alex [at] capitalkpartners [dot] com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
from copy import deepcopy
import numpy as np
import random
import math
from sklearn.base import BaseEstimator
from tree_helpers import clear_sklearn_fields
from typecheck import check_estimator, check_dict, check_int, check_bool
class BaseEnsemble(BaseEstimator):
def __init__(self,
base_model,
num_models,
bagging_percent,
bagging_replacement,
feature_subset_percent,
stacking_model,
randomize_params,
additive,
verbose):
check_estimator(base_model)
check_int(num_models)
self.base_model = base_model
self.num_models = num_models
self.bagging_percent = bagging_percent
self.bagging_replacement = bagging_replacement
self.feature_subset_percent = feature_subset_percent
self.stacking_model = stacking_model
self.randomize_params = randomize_params
self.additive = additive
self.verbose = verbose
self.need_to_fit = True
self.models = None
self.weights = None
def fit(self, X, Y, **fit_keywords):
assert self.base_model is not None
assert self.bagging_percent is not None
assert self.bagging_replacement is not None
assert self.num_models is not None
assert self.verbose is not None
self.need_to_fit = False
self.models = []
X = np.atleast_2d(X)
Y = np.atleast_1d(Y)
n_rows, total_features = X.shape
bagsize = int(math.ceil(self.bagging_percent * n_rows))
if self.additive:
self.weights = np.ones(self.num_models, dtype='float')
else:
self.weights = np.ones(self.num_models, dtype='float') / self.num_models
# each derived class needs to implement this
self._init_fit(X,Y)
if self.feature_subset_percent < 1:
n_features = int(math.ceil(self.feature_subset_percent * total_features))
self.feature_subsets = []
else:
n_features = total_features
self.feature_subsets = None
for i in xrange(self.num_models):
if self.verbose:
print "Training iteration", i
if self.bagging_replacement:
indices = np.random.random_integers(0,n_rows-1,bagsize)
else:
p = np.random.permutation(n_rows)
indices = p[:bagsize]
data_subset = X[indices, :]
if n_features < total_features:
feature_indices = np.random.permutation(total_features)[:n_features]
self.feature_subsets.append(feature_indices)
data_subset = data_subset[:, feature_indices]
label_subset = Y[indices]
model = deepcopy(self.base_model)
# randomize parameters using given functions
for param_name, fn in self.randomize_params.items():
setattr(model, param_name, fn())
model.fit(data_subset, label_subset, **fit_keywords)
self.models.append(model)
self._created_model(X, Y, indices, i, model)
if self.additive:
if n_features < total_features:
Y -= model.predict(X[:, feature_indices])
else:
Y -= model.predict(X)
clear_sklearn_fields(model)
# stacking works by treating the outputs of each base classifier as the
# inputs to an additional meta-classifier
if self.stacking_model:
transformed_data = self.transform(X)
self.stacking_model.fit(transformed_data, Y)
def transform(self, X):
"""Convert each feature vector into a row of predictions."""
assert self.models is not None
X = np.atleast_2d(X)
n_samples, n_features = X.shape
n_models = len(self.models)
pred = np.zeros([n_samples, n_models])
if self.feature_subsets:
for i, model in enumerate(self.models):
feature_indices = self.feature_subsets[i]
X_subset = X[:, feature_indices]
pred[:, i] = model.predict(X_subset)
else:
for i, model in enumerate(self.models):
pred[:, i] = model.predict(X)
return pred
| lgpl-3.0 |
ControCurator/controcurator | models/article.py | 1 | 1711 |
import justext
import string
from pprint import pprint
from elasticsearch import Elasticsearch
from elasticsearch_dsl import DocType, Date, String, Nested
import re
import pandas as pd
from esengine import Document, ArrayField, StringField, ObjectField
es = Elasticsearch(['http://controcurator.org/ess/'], port=80)
# Articles are any kind of document
class getArticleMod(Document):
_es = Elasticsearch(['http://controcurator.org/ess/'], port=80)
_doctype = "article"
_index = "controcurator"
url = String(index='not_analyzed')
class markProcessed(Document):
_es = Elasticsearch(['http://controcurator.org/ess/'], port=80)
_doctype = "twitter"
_index = "crowdynews"
entities = ObjectField()
processed = StringField()
class updateParent(Document):
_es = Elasticsearch(['http://controcurator.org/ess/'], port=80)
_doctype = "twitter"
_index = "crowdynews"
parent = ObjectField(properties={
"url":String(index='not_analyzed')
})
class Article(DocType):
_es = Elasticsearch(['http://controcurator.org/ess/'], port=80)
_type = "article"
source = String(index='not_analyzed')
type = String(index='not_analyzed')
parent = String(index='not_analyzed')
url = String(index='not_analyzed')
published = Date()
document = Nested(properties={
"title":String(index='analyzed'),
"image":String(index='not_analyzed'),
"text":String(index='analyzed'),
"author":String(index='analyzed')
})
comments = Nested(properties={
'author-id': String(index='not_analyzed'),
'author': String(index='not_analyzed'),
'timestamp': Date(),
'text' : String(),
'reply-to': String(),
'id': String()
})
class Meta:
index = 'controcurator'
| mit |
256481788jianghao/share_test | ToolModule.py | 1 | 1814 | import matplotlib.pyplot as plxy
import datetime
import time
class PlotTool:
colors = ['red','blue','yellow','green','black']
def plotxy(self,XYs,xlabel='x',ylabel='y',title='(x,y)'):
line_count = len(XYs)
colIndex = 0
for i in range(line_count):
xy = XYs[i]
x = xy[0]
y = xy[1]
if colIndex >= line_count -1:
colIndex = 0
plxy.plot(x,y,color=self.colors[colIndex])
colIndex += 1
plxy.xlabel(xlabel)
plxy.ylabel(ylabel)
plxy.show()
#得到包括今天在内过去n天的日期列表
def getDateList(n=0,removeWeekend = True):
now = datetime.datetime.now()
deltalist = [datetime.timedelta(days=-x) for x in range(n*2+1)]
#print(deltalist)
n_days = [ now + delta for delta in deltalist]
if removeWeekend:
n_days = [x for x in n_days if x.weekday() < 5]
return [ x.strftime('%Y-%m-%d') for x in n_days ][0:n+1]
def dateToNum(date):
return int(date.replace('-',''))
def numToDate(num):
tmp = str(num)
y = tmp[0:4]
m = tmp[4:6]
d = tmp[6:len(tmp)]
return y+'-'+m+'-'+d
def getNow():
now = time.time()
return now
def strToFloat(string):
#str_len = len(string)
tmp = string
try:
return float(tmp)
except:
print(tmp)
i = -1
while not tmp[i].isdigit():
tmp = tmp[0:i]
i = i -1
print(tmp)
return float(tmp)
if __name__ == '__main__':
pt = PlotTool()
x=[1,2,3,4]
y=[1,2,3,4]
y2=[t+0.5 for t in y]
y3=[t+0.5 for t in y2]
y4=[t+0.5 for t in y3]
y5=[t+0.5 for t in y4]
y6=[t+0.5 for t in y5]
XYs=[[x,y],[x,y2],[x,y3],[x,y4],[x,y5],[x,y6]]
pt.plotxy(XYs)
#getDateList(10)
| apache-2.0 |
tdhopper/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 65 | 50308 | # Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
stellasia/django-pandasjsonfield | pandasjsonfield/tests.py | 1 | 1876 | # -*- coding: utf-8 -*-
import sys
import pandas as pd
import pandas.util.testing as pdt
from django.db import models
from django.test import TestCase
from .fields import PandasJSONField
from .models import MyModel
class PandasJSONField(TestCase):
"""
pandasjsonfield tests
"""
model = MyModel
l = [1, 2, 3, 4]
d = {"a": [1, 2, 3, 4],
"b": [11, 12, 13, 14],
"c": [21, 22, 23, 24]}
def test_field_creation_series(self):
""" Test saving a Series to a PandasJSONField
"""
s = pd.Series(self.l)
obj = self.model.objects.create(serie=s)
new_obj = self.model.objects.get(pk=obj.pk)
pdt.assert_series_equal(new_obj.serie, s)
def test_field_creation_dataframe(self):
""" Test saving a DataFrame to a PandasJSONField
"""
df = pd.DataFrame(self.d)
obj = self.model.objects.create(dataframe=df)
new_obj = self.model.objects.get(pk=obj.pk)
pdt.assert_frame_equal(new_obj.dataframe, df)
def test_field_modify_series(self):
""" Test updating a PandasJSONField """
s = pd.Series(self.l)
obj = self.model.objects.create(serie=s)
pdt.assert_series_equal(obj.serie, s)
s2 = pd.Series( [10, 11, 12] )
obj.serie = s2
pdt.assert_series_equal(obj.serie, s2)
obj.save()
pdt.assert_series_equal(obj.serie, s2)
def test_field_modify_dataframe(self):
""" Test updating a PandasJSONField """
df = pd.DataFrame(self.d)
obj = self.model.objects.create(dataframe=df)
pdt.assert_frame_equal(obj.dataframe, df)
df2 = pd.DataFrame( {"g": [10, 11, 12], "h": [20, 21, 22]} )
obj.dataframe = df2
pdt.assert_frame_equal(obj.dataframe, df2)
obj.save()
pdt.assert_frame_equal(obj.dataframe, df2)
| mit |
mkondratyev85/pgm | tests/test_interpolate.py | 1 | 14808 | import unittest
import matplotlib.pylab as plt
import numpy as np
from interpolate import (interpolate2m,
interpolate2m_vect,
interpolate,
interpolate_harmonic,
fill_nans)
class TestInterpolation(unittest.TestCase):
def setUp(self):
pass
def test_fill_nan(self):
m = np.array([[np.nan, 1, 1, 1],
[2, 2, np.nan, 2],
[3, 3, 3, np.nan]])
m2 = fill_nans(m)
self.assertTrue(np.allclose(m2,
np.array([[1.,1.,1.,1.],
[2.,2.,2.,2.],
[3.,3.,3.,3.]])))
def test_interplate(self):
# test simple interpolation onto grid nodes
mxx = np.array([0,1,0,1,0,1])#, .5, .5])
myy = np.array([0,0,1,1,2,2])#, .5,1.5])
v = np.array([0,1,0,1,0,1])#, .5, .5])
result = interpolate(mxx, myy, 3, 2, (v,))[0]
self.assertTrue(np.array_equal(result,
np.array([[0,1],
[0,1],
[0,1]])))
# test interpolation iside grid cells
mxx = np.array([0.25, 0.25, 0.25, 0.75, 0.75, 0.75])
myy = np.array([0.25, 1, 1.75, 0.25, 1, 1.75])
v = np.array([0, 1, 2, 1, 2, 3])
result = interpolate(mxx, myy, 3, 2, (v,))[0]
self.assertTrue(np.array_equal(result,
np.array([[0,1],
[1,2],
[2,3]])))
mxx = np.array([ 0.25, 0.75, 0.25, 0.75,-0.25,1.25])
myy = np.array([-0.25,-0.25, 2.25, 2.25,1, 1])
v = np.array([ 0, 1, 2, 3, 4, 5])
result = interpolate(mxx, myy, 3, 2, (v,))[0]
self.assertTrue(np.array_equal(result,
np.array([[0.,1.],
[4.,5.],
[2.,3.]])))
# test complicated interpolation
np.random.seed(2)
i_res, j_res = 3,5
mxx = np.random.uniform(0, j_res, 1500)-.5
myy = np.random.uniform(0, i_res, 1500)-.5
v = mxx + myy
result = interpolate(mxx, myy, i_res, j_res, (v,))[0]
result_int = np.array([[0,1,2,3,4],
[1,2,3,4,5],
[2,3,4,5,6]])
diff = result - result_int
self.assertTrue((diff<0.09).all(),
' Interpolated gird should differ from ideal less then 0.09' )
#plt.imshow(result, interpolation="none")
#plt.scatter(mxx, myy, s=25, c=v, edgecolors='black')
#plt.show()
##print (result)
def test_interpolate_harmonic(self):
# test simple interpolation onto grid nodes
mxx = np.array([0,1,0,1,0,1])#, .5, .5])
myy = np.array([0,0,1,1,2,2])#, .5,1.5])
v = np.array([0,1,0,1,0,1])#, .5, .5])
result = interpolate_harmonic(mxx, myy, 3, 2, v)
self.assertTrue(np.array_equal(result,
np.array([[0,1],
[0,1],
[0,1]])))
# test interpolation iside grid cells
mxx = np.array([0.25, 0.25, 0.25, 0.75, 0.75, 0.75])
myy = np.array([0.25, 1, 1.75, 0.25, 1, 1.75])
v = np.array([0, 1, 2, 1, 2, 3])
result = interpolate_harmonic(mxx, myy, 3, 2, v)
self.assertTrue(np.array_equal(result,
np.array([[0,1],
[1,2],
[2,3]])))
mxx = np.array([ 0.25, 0.75, 0.25, 0.75,-0.25,1.25])
myy = np.array([-0.25,-0.25, 2.25, 2.25,1, 1])
v = np.array([ 0, 1, 2, 3, 4, 5])
result = interpolate_harmonic(mxx, myy, 3, 2, v)
self.assertTrue(np.array_equal(result,
np.array([[0.,1.],
[4.,5.],
[2.,3.]])))
# test complicated interpolation
np.random.seed(1)
i_res, j_res = 3,5
mxx = np.random.uniform(0, j_res, 1500)-.5
myy = np.random.uniform(0, i_res, 1500)-.5
v = mxx + myy
result = interpolate_harmonic(mxx, myy, i_res, j_res, v)
result_int = np.array([[0,1,2,3,4],
[1,2,3,4,5],
[2,3,4,5,6]])
diff = result - result_int
self.assertTrue((diff<0.3).all(),
' Interpolated gird should differ from ideal less then 0.3' )
def test_interpolate2m_vect(self):
array = np.array([[0, 1, 1.5, 2],
[1, 2, 3, 2],
[2, 2, 3, 4]])
# test interpolation in the center of the cells
self.assertEqual(interpolate2m_vect(np.array([0.5]), np.array([0.5]), array), 1.0)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([0.5]), array), 1.875)
self.assertEqual(interpolate2m_vect(np.array([2.5]), np.array([0.5]), array), 2.125)
self.assertEqual(interpolate2m_vect(np.array([0.5]), np.array([1.5]), array), 1.75)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([1.5]), array), 2.5)
self.assertEqual(interpolate2m_vect(np.array([2.5]), np.array([1.5]), array), 3)
# test interpolation on nodes of the gird
self.assertEqual(interpolate2m_vect(np.array([0]), np.array([0]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([1]), np.array([0]), array), 1.0)
self.assertEqual(interpolate2m_vect(np.array([2]), np.array([0]), array), 1.5)
self.assertEqual(interpolate2m_vect(np.array([3]), np.array([0]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([0]), np.array([1]), array), 1.0)
self.assertEqual(interpolate2m_vect(np.array([1]), np.array([1]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([2]), np.array([1]), array), 3.0)
self.assertEqual(interpolate2m_vect(np.array([3]), np.array([1]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([0]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([1]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([2]), np.array([2]), array), 3.0)
self.assertEqual(interpolate2m_vect(np.array([3]), np.array([2]), array), 4.0)
# test interpolation between nodes of the grid
self.assertEqual(interpolate2m_vect(np.array([.5]), np.array([0]), array), .5)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([0]), array), 1.25)
self.assertEqual(interpolate2m_vect(np.array([2.5]), np.array([0]), array), 1.75)
self.assertEqual(interpolate2m_vect(np.array([.5]), np.array([1]), array), 1.5)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([1]), array), 2.5)
self.assertEqual(interpolate2m_vect(np.array([2.5]), np.array([1]), array), 2.5)
self.assertEqual(interpolate2m_vect(np.array([.5]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([2]), array), 2.5)
self.assertEqual(interpolate2m_vect(np.array([2.5]), np.array([2]), array), 3.5)
self.assertEqual(interpolate2m_vect(np.array([0]), np.array([0.5]), array), 0.5)
self.assertEqual(interpolate2m_vect(np.array([0]), np.array([1.5]), array), 1.5)
self.assertEqual(interpolate2m_vect(np.array([2]), np.array([0.5]), array), 2.25)
self.assertEqual(interpolate2m_vect(np.array([2]), np.array([1.5]), array), 3)
self.assertEqual(interpolate2m_vect(np.array([3]), np.array([0.5]), array), 2)
self.assertEqual(interpolate2m_vect(np.array([3]), np.array([1.5]), array), 3)
# test interpolation outside
array = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([1]), array), 1)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([1]), array), 1)
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([2.5]), array),2.0)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([2.5]), array), 2.0)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([2.5]), array), 2.0)
array = np.array([[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]])
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([-0.5]), np.array([2.5]), array), 0.0)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([-0.5]), array), 1.5)
self.assertEqual(interpolate2m_vect(np.array([1.5]), np.array([2.5]), array), 1.5)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([-0.5]), array), 3)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([1]), array), 3)
self.assertEqual(interpolate2m_vect(np.array([3.5]), np.array([2.5]), array), 3)
def test_interpolate2m(self):
array = np.array([[0, 1, 1.5, 2],
[1, 2, 3, 2],
[2, 2, 3, 4]])
# test interpolation in the center of the cells
self.assertEqual(interpolate2m(np.array([0.5]), np.array([0.5]), array), 1.0)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([0.5]), array), 1.875)
self.assertEqual(interpolate2m(np.array([2.5]), np.array([0.5]), array), 2.125)
self.assertEqual(interpolate2m(np.array([0.5]), np.array([1.5]), array), 1.75)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([1.5]), array), 2.5)
self.assertEqual(interpolate2m(np.array([2.5]), np.array([1.5]), array), 3)
# test interpolation on nodes of the gird
self.assertEqual(interpolate2m(np.array([0]), np.array([0]), array), 0.0)
self.assertEqual(interpolate2m(np.array([1]), np.array([0]), array), 1.0)
self.assertEqual(interpolate2m(np.array([2]), np.array([0]), array), 1.5)
self.assertEqual(interpolate2m(np.array([3]), np.array([0]), array), 2.0)
self.assertEqual(interpolate2m(np.array([0]), np.array([1]), array), 1.0)
self.assertEqual(interpolate2m(np.array([1]), np.array([1]), array), 2.0)
self.assertEqual(interpolate2m(np.array([2]), np.array([1]), array), 3.0)
self.assertEqual(interpolate2m(np.array([3]), np.array([1]), array), 2.0)
self.assertEqual(interpolate2m(np.array([0]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m(np.array([1]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m(np.array([2]), np.array([2]), array), 3.0)
self.assertEqual(interpolate2m(np.array([3]), np.array([2]), array), 4.0)
# test interpolation between nodes of the grid
self.assertEqual(interpolate2m(np.array([.5]), np.array([0]), array), .5)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([0]), array), 1.25)
self.assertEqual(interpolate2m(np.array([2.5]), np.array([0]), array), 1.75)
self.assertEqual(interpolate2m(np.array([.5]), np.array([1]), array), 1.5)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([1]), array), 2.5)
self.assertEqual(interpolate2m(np.array([2.5]), np.array([1]), array), 2.5)
self.assertEqual(interpolate2m(np.array([.5]), np.array([2]), array), 2.0)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([2]), array), 2.5)
self.assertEqual(interpolate2m(np.array([2.5]), np.array([2]), array), 3.5)
self.assertEqual(interpolate2m(np.array([0]), np.array([0.5]), array), 0.5)
self.assertEqual(interpolate2m(np.array([0]), np.array([1.5]), array), 1.5)
self.assertEqual(interpolate2m(np.array([2]), np.array([0.5]), array), 2.25)
self.assertEqual(interpolate2m(np.array([2]), np.array([1.5]), array), 3)
self.assertEqual(interpolate2m(np.array([3]), np.array([0.5]), array), 2)
self.assertEqual(interpolate2m(np.array([3]), np.array([1.5]), array), 3)
# test interpolation outside
array = np.array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([1]), array), 1)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([1]), array), 1)
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([2.5]), array),2.0)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([2.5]), array), 2.0)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([2.5]), array), 2.0)
array = np.array([[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]])
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([-0.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([-0.5]), np.array([2.5]), array), 0.0)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([-0.5]), array), 1.5)
self.assertEqual(interpolate2m(np.array([1.5]), np.array([2.5]), array), 1.5)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([-0.5]), array), 3)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([1]), array), 3)
self.assertEqual(interpolate2m(np.array([3.5]), np.array([2.5]), array), 3)
if __name__=='__main__':
unittest.main()
| mit |
cg-laser/geoceLDF | atmosphere.py | 1 | 62743 | import numpy as np
import os
import pickle
import unittest
default_curved = True
default_model = 17
r_e = 6.371 * 1e6 # radius of Earth
"""
All functions use "grams" and "meters", only the functions that receive and
return "atmospheric depth" use the unit "g/cm^2"
atmospheric density models as used in CORSIKA. The parameters are documented in the CORSIKA manual
the parameters for the Auger atmospheres are documented in detail in GAP2011-133
The May and October atmospheres describe the annual average best.
"""
h_max = 112829.2 # height above sea level where the mass overburden vanishes
atm_models = { # US standard after Linsley
1: {'a': 1e4 * np.array([-186.555305, -94.919, 0.61289, 0., 0.01128292]),
'b': 1e4 * np.array([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.]),
'c': 1e-2 * np.array([994186.38, 878153.55, 636143.04, 772170.16, 1.e9]),
'h': 1e3 * np.array([4., 10., 40., 100.])
},
# US standard after Keilhauer
17: {'a': 1e4 * np.array([-149.801663, -57.932486, 0.63631894, 4.35453690e-4, 0.01128292]),
'b': 1e4 * np.array([1183.6071, 1143.0425, 1322.9748, 655.67307, 1.]),
'c': 1e-2 * np.array([954248.34, 800005.34, 629568.93, 737521.77, 1.e9]),
'h': 1e3 * np.array([7., 11.4, 37., 100.])
},
# Malargue January
18: {'a': 1e4 * np.array([-136.72575606, -31.636643044, 1.8890234035, 3.9201867984e-4, 0.01128292]),
'b': 1e4 * np.array([1174.8298334, 1204.8233453, 1637.7703583, 735.96095023, 1.]),
'c': 1e-2 * np.array([982815.95248, 754029.87759, 594416.83822, 733974.36972, 1e9]),
'h': 1e3 * np.array([9.4, 15.3, 31.6, 100.])
},
# Malargue February
19: {'a': 1e4 * np.array([-137.25655862, -31.793978896, 2.0616227547, 4.1243062289e-4, 0.01128292]),
'b': 1e4 * np.array([1176.0907565, 1197.8951104, 1646.4616955, 755.18728657, 1.]),
'c': 1e-2 * np.array([981369.6125, 756657.65383, 592969.89671, 731345.88332, 1.e9]),
'h': 1e3 * np.array([9.2, 15.4, 31., 100.])
},
# Malargue March
20: {'a': 1e4 * np.array([-132.36885162, -29.077046629, 2.090501509, 4.3534337925e-4, 0.01128292]),
'b': 1e4 * np.array([1172.6227784, 1215.3964677, 1617.0099282, 769.51991638, 1.]),
'c': 1e-2 * np.array([972654.0563, 742769.2171, 595342.19851, 728921.61954, 1.e9]),
'h': 1e3 * np.array([9.6, 15.2, 30.7, 100.])
},
# Malargue April
21: {'a': 1e4 * np.array([-129.9930412, -21.847248438, 1.5211136484, 3.9559055121e-4, 0.01128292]),
'b': 1e4 * np.array([1172.3291878, 1250.2922774, 1542.6248413, 713.1008285, 1.]),
'c': 1e-2 * np.array([962396.5521, 711452.06673, 603480.61835, 735460.83741, 1.e9]),
'h': 1e3 * np.array([10., 14.9, 32.6, 100.])
},
# Malargue May
22: {'a': 1e4 * np.array([-125.11468467, -14.591235621, 0.93641128677, 3.2475590985e-4, 0.01128292]),
'b': 1e4 * np.array([1169.9511302, 1277.6768488, 1493.5303781, 617.9660747, 1.]),
'c': 1e-2 * np.array([947742.88769, 685089.57509, 609640.01932, 747555.95526, 1.e9]),
'h': 1e3 * np.array([10.2, 15.1, 35.9, 100.])
},
# Malargue June
23: {'a': 1e4 * np.array([-126.17178851, -7.7289852811, 0.81676828638, 3.1947676891e-4, 0.01128292]),
'b': 1e4 * np.array([1171.0916276, 1295.3516434, 1455.3009344, 595.11713507, 1.]),
'c': 1e-2 * np.array([940102.98842, 661697.57543, 612702.0632, 749976.26832, 1.e9]),
'h': 1e3 * np.array([10.1, 16., 36.7, 100.])
},
# Malargue July
24: {'a': 1e4 * np.array([-126.17216789, -8.6182537514, 0.74177836911, 2.9350702097e-4, 0.01128292]),
'b': 1e4 * np.array([1172.7340688, 1258.9180079, 1450.0537141, 583.07727715, 1.]),
'c': 1e-2 * np.array([934649.58886, 672975.82513, 614888.52458, 752631.28536, 1.e9]),
'h': 1e3 * np.array([9.6, 16.5, 37.4, 100.])
},
# Malargue August
25: {'a': 1e4 * np.array([-123.27936204, -10.051493041, 0.84187346153, 3.2422546759e-4, 0.01128292]),
'b': 1e4 * np.array([1169.763036, 1251.0219808, 1436.6499372, 627.42169844, 1.]),
'c': 1e-2 * np.array([931569.97625, 678861.75136, 617363.34491, 746739.16141, 1.e9]),
'h': 1e3 * np.array([9.6, 15.9, 36.3, 100.])
},
# Malargue September
26: {'a': 1e4 * np.array([-126.94494665, -9.5556536981, 0.74939405052, 2.9823116961e-4, 0.01128292]),
'b': 1e4 * np.array([1174.8676453, 1251.5588529, 1440.8257549, 606.31473165, 1.]),
'c': 1e-2 * np.array([936953.91919, 678906.60516, 618132.60561, 750154.67709, 1.e9]),
'h': 1e3 * np.array([9.5, 15.9, 36.3, 100.])
},
# Malargue October
27: {'a': 1e4 * np.array([-133.13151125, -13.973209265, 0.8378263431, 3.111742176e-4, 0.01128292]),
'b': 1e4 * np.array([1176.9833473, 1244.234531, 1464.0120855, 622.11207419, 1.]),
'c': 1e-2 * np.array([954151.404, 692708.89816, 615439.43936, 747969.08133, 1.e9]),
'h': 1e3 * np.array([9.5, 15.5, 36.5, 100.])
},
# Malargue November
28: {'a': 1e4 * np.array([-134.72208165, -18.172382908, 1.1159806845, 3.5217025515e-4, 0.01128292]),
'b': 1e4 * np.array([1175.7737972, 1238.9538504, 1505.1614366, 670.64752105, 1.]),
'c': 1e-2 * np.array([964877.07766, 706199.57502, 610242.24564, 741412.74548, 1.e9]),
'h': 1e3 * np.array([9.6, 15.3, 34.6, 100.])
},
# Malargue December
29: {'a': 1e4 * np.array([-135.40825209, -22.830409026, 1.4223453493, 3.7512921774e-4, 0.01128292]),
'b': 1e4 * np.array([1174.644971, 1227.2753683, 1585.7130562, 691.23389637, 1.]),
'c': 1e-2 * np.array([973884.44361, 723759.74682, 600308.13983, 738390.20525, 1.e9]),
'h': 1e3 * np.array([9.6, 15.6, 33.3, 100.])
}
}
def get_auger_monthly_model(month):
""" Helper function to get the correct model number for monthly Auger atmospheres """
return month + 17
def get_height_above_ground(d, zenith, observation_level=0):
""" returns the perpendicular height above ground for a distance d from ground at a given zenith angle """
r = r_e + observation_level
x = d * np.sin(zenith)
y = d * np.cos(zenith) + r
h = (x ** 2 + y ** 2) ** 0.5 - r
# print "d = %.1f, obs = %.1f, z = %.2f -> h = %.1f" % (d, observation_level, np.rad2deg(zenith), h)
return h
def get_distance_for_height_above_ground(h, zenith, observation_level=0):
""" inverse of get_height_above_ground() """
r = r_e + observation_level
return (h ** 2 + 2 * r * h + r ** 2 * np.cos(zenith) ** 2) ** 0.5 - r * np.cos(zenith)
def get_vertical_height(at, model=default_model):
""" input: atmosphere above in g/cm^2 [e.g. Xmax]
output: height in m """
return _get_vertical_height(at * 1e4, model=model)
def _get_vertical_height(at, model=default_model):
if np.shape(at) == ():
T = _get_i_at(at, model=model)
else:
T = np.zeros(len(at))
for i, at in enumerate(at):
T[i] = _get_i_at(at, model=model)
return T
def _get_i_at(at, model=default_model):
a = atm_models[model]['a']
b = atm_models[model]['b']
c = atm_models[model]['c']
layers = atm_models[model]['h']
if at > _get_atmosphere(layers[0], model=model):
i = 0
elif at > _get_atmosphere(layers[1], model=model):
i = 1
elif at > _get_atmosphere(layers[2], model=model):
i = 2
elif at > _get_atmosphere(layers[3], model=model):
i = 3
else:
i = 4
if i == 4:
h = -1. * c[i] * (at - a[i]) / b[i]
else:
h = -1. * c[i] * np.log((at - a[i]) / b[i])
return h
def get_atmosphere(h, model=default_model):
""" returns the (vertical) amount of atmosphere above the height h above see level
in units of g/cm^2
input: height above sea level in meter"""
return _get_atmosphere(h, model=model) * 1e-4
def _get_atmosphere(h, model=default_model):
a = atm_models[model]['a']
b = atm_models[model]['b']
c = atm_models[model]['c']
layers = atm_models[model]['h']
y = np.where(h < layers[0], a[0] + b[0] * np.exp(-1 * h / c[0]), a[1] + b[1] * np.exp(-1 * h / c[1]))
y = np.where(h < layers[1], y, a[2] + b[2] * np.exp(-1 * h / c[2]))
y = np.where(h < layers[2], y, a[3] + b[3] * np.exp(-1 * h / c[3]))
y = np.where(h < layers[3], y, a[4] - b[4] * h / c[4])
y = np.where(h < h_max, y, 0)
return y
def get_density(h, allow_negative_heights=True, model=default_model):
""" returns the atmospheric density [g/m^3] for the height h above see level"""
b = atm_models[model]['b']
c = atm_models[model]['c']
layers = atm_models[model]['h']
y = np.zeros_like(h, dtype=np.float)
if not allow_negative_heights:
y *= np.nan # set all requested densities for h < 0 to nan
y = np.where(h < 0, y, b[0] * np.exp(-1 * h / c[0]) / c[0])
else:
y = b[0] * np.exp(-1 * h / c[0]) / c[0]
y = np.where(h < layers[0], y, b[1] * np.exp(-1 * h / c[1]) / c[1])
y = np.where(h < layers[1], y, b[2] * np.exp(-1 * h / c[2]) / c[2])
y = np.where(h < layers[2], y, b[3] * np.exp(-1 * h / c[3]) / c[3])
y = np.where(h < layers[3], y, b[4] / c[4])
y = np.where(h < h_max, y, 0)
return y
def get_density_from_barometric_formula(hh):
""" returns the atmospheric density [g/m^3] for the height h abolve see level
according to https://en.wikipedia.org/wiki/Barometric_formula"""
if isinstance(hh, float):
hh = np.array([hh])
R = 8.31432 # universal gas constant for air: 8.31432 N m/(mol K)
g0 = 9.80665 # gravitational acceleration (9.80665 m/s2)
M = 0.0289644 # molar mass of Earth's air (0.0289644 kg/mol)
rhob = [1.2250, 0.36391, 0.08803, 0.01322, 0.00143, 0.00086, 0.000064]
Tb = [288.15, 216.65, 216.65, 228.65, 270.65, 270.65, 214.65]
Lb = [-0.0065, 0, 0.001, 0.0028, 0, -0.0028, -0.002]
hb = [0, 11000, 20000, 32000, 47000, 51000, 71000]
def rho1(h, i): # for Lb != 0
return rhob[i] * (Tb[i] / (Tb[i] + Lb[i] * (h - hb[i]))) ** (1 + (g0 * M) / (R * Lb[i]))
def rho2(h, i): # for Lb == 0
return rhob[i] * np.exp(-g0 * M * (h - hb[i]) / (R * Tb[i]))
densities = np.zeros_like(hh)
for i, h in enumerate(hh):
if (h < 0):
densities[i] = np.nan
elif(h > 86000):
densities[i] = 0
else:
t = h - hb
# print "t = ", t, "h = ", h
index = np.argmin(t[t >= 0])
if Lb[index] == 0:
densities[i] = rho2(h, index)
else:
densities[i] = rho1(h, index)
# print "h = ", h, " index = ", index, " density = ", densities[i]
return densities * 1e3
def get_atmosphere_upper_limit(model=default_model):
""" returns the altitude where the mass overburden vanishes """
from scipy import optimize
from functools import partial
return optimize.newton(partial(_get_atmosphere, model=model), x0=112.8e3)
def get_n(h, n0=(1 + 2.92e-4), allow_negative_heights=False,
model=1):
return (n0 - 1) * get_density(h, allow_negative_heights=allow_negative_heights,
model=model) / get_density(0, model=model) + 1
class Atmosphere():
def __init__(self, model=17, n_taylor=5, curved=True, zenith_numeric=np.deg2rad(83)):
import sys
print "model is ", model
self.model = model
self.curved = curved
self.n_taylor = n_taylor
self.__zenith_numeric = zenith_numeric
self.b = atm_models[model]['b']
self.c = atm_models[model]['c']
self.number_of_zeniths = 101
hh = atm_models[model]['h']
self.h = np.append([0], hh)
if curved:
folder = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(folder, "constants_%02i_%i.picke" % (self.model, n_taylor))
print "searching constants at ", filename
if os.path.exists(filename):
print "reading constants from ", filename
fin = open(filename, "r")
self.a, self.d = pickle.load(fin)
fin.close()
if(len(self.a) != self.number_of_zeniths):
os.remove(filename)
print "constants outdated, please rerun to calculate new constants"
sys.exit(0)
self.a_funcs = []
zeniths = np.arccos(np.linspace(0, 1, self.number_of_zeniths))
from scipy.interpolate import interp1d
mask = zeniths < np.deg2rad(83)
for i in xrange(5):
self.a_funcs.append(interp1d(zeniths[mask], self.a[..., i][mask], kind='cubic'))
else:
# self.d = self.__calculate_d()
self.d = np.zeros(self.number_of_zeniths)
self.a = self.__calculate_a()
fin = open(filename, "w")
pickle.dump([self.a, self.d], fin)
fin.close()
print "all constants calculated, exiting now... please rerun your analysis"
sys.exit(0)
def __calculate_a(self,):
zeniths = np.arccos(np.linspace(0, 1, self.number_of_zeniths))
a = np.zeros((self.number_of_zeniths, 5))
self.curved = True
self.__zenith_numeric = 0
for iZ, z in enumerate(zeniths):
print "calculating constants for %.02f deg zenith angle (iZ = %i, nT = %i)..." % (np.rad2deg(z), iZ, self.n_taylor)
a[iZ] = self.__get_a(z)
print "\t... a = ", a[iZ], " iZ = ", iZ
return a
def __get_a(self, zenith):
a = np.zeros(5)
b = self.b
c = self.c
h = self.h
a[0] = self._get_atmosphere_numeric([zenith], h_low=h[0]) - b[0] * self._get_dldh(h[0], zenith, 0)
a[1] = self._get_atmosphere_numeric([zenith], h_low=h[1]) - b[1] * np.exp(-h[1] / c[1]) * self._get_dldh(h[1], zenith, 1)
a[2] = self._get_atmosphere_numeric([zenith], h_low=h[2]) - b[2] * np.exp(-h[2] / c[2]) * self._get_dldh(h[2], zenith, 2)
a[3] = self._get_atmosphere_numeric([zenith], h_low=h[3]) - b[3] * np.exp(-h[3] / c[3]) * self._get_dldh(h[3], zenith, 3)
a[4] = self._get_atmosphere_numeric([zenith], h_low=h[4]) + b[4] * h[4] / c[4] * self._get_dldh(h[4], zenith, 4)
return a
def _get_dldh(self, h, zenith, iH):
if iH < 4:
c = self.c[iH]
st = np.sin(zenith)
ct = np.cos(zenith)
dldh = np.ones_like(zenith) / ct
if self.n_taylor >= 1:
dldh += -(st ** 2 / ct ** 3 * (c + h) / r_e)
if self.n_taylor >= 2:
tmp = 3. / 2. * st ** 2 * (2 * c ** 2 + 2 * c * h + h ** 2) / (r_e ** 2 * ct ** 5)
dldh += tmp
if self.n_taylor >= 3:
t1 = 6 * c ** 3 + 6 * c ** 2 * h + 3 * c * h ** 2 + h ** 3
tmp = st ** 2 / (2 * r_e ** 3 * ct ** 7) * (ct ** 2 - 5) * t1
dldh += tmp
if self.n_taylor >= 4:
t1 = 24 * c ** 4 + 24 * c ** 3 * h + 12 * c ** 2 * h ** 2 + 4 * c * h ** 3 + h ** 4
tmp = -1. * st ** 2 * 5. / (8. * r_e ** 4 * ct ** 9) * (3 * ct ** 2 - 7) * t1
dldh += tmp
if self.n_taylor >= 5:
t1 = 120 * c ** 5 + 120 * c ** 4 * h + 60 * c ** 3 * h ** 2 + 20 * c ** 2 * h ** 3 + 5 * c * h ** 4 + h ** 5
tmp = st ** 2 * (ct ** 4 - 14. * ct ** 2 + 21.) * (-3. / 8.) / (r_e ** 5 * ct ** 11) * t1
dldh += tmp
elif(iH == 4):
c = self.c[iH]
st = np.sin(zenith)
ct = np.cos(zenith)
dldh = np.ones_like(zenith) / ct
if self.n_taylor >= 1:
dldh += (-0.5 * st ** 2 / ct ** 3 * h / r_e)
if self.n_taylor >= 2:
dldh += 0.5 * st ** 2 / ct ** 5 * (h / r_e) ** 2
if self.n_taylor >= 3:
dldh += 1. / 8. * (st ** 2 * (ct ** 2 - 5) * h ** 3) / (r_e ** 3 * ct ** 7)
if self.n_taylor >= 4:
tmp2 = -1. / 8. * st ** 2 * (3 * ct ** 2 - 7) * (h / r_e) ** 4 / ct ** 9
dldh += tmp2
if self.n_taylor >= 5:
tmp2 = -1. / 16. * st ** 2 * (ct ** 4 - 14 * ct ** 2 + 21) * (h / r_e) ** 5 / ct ** 11
dldh += tmp2
else:
print "ERROR, height index our of bounds"
import sys
sys.exit(-1)
# print "get dldh for h= %.8g, z = %.8g, iH=%i -> %.7f" % (h, np.rad2deg(zenith), iH, dldh)
return dldh
def __get_method_mask(self, zenith):
if not self.curved:
return np.ones_like(zenith, dtype=np.bool), np.zeros_like(zenith, dtype=np.bool), np.zeros_like(zenith, dtype=np.bool)
mask_flat = np.zeros_like(zenith, dtype=np.bool)
mask_taylor = zenith < self.__zenith_numeric
mask_numeric = zenith >= self.__zenith_numeric
return mask_flat, mask_taylor, mask_numeric
def __get_height_masks(self, hh):
# mask0 = (hh >= 0) & (hh < atm_models[self.model]['h'][0])
mask0 = (hh < atm_models[self.model]['h'][0])
mask1 = (hh >= atm_models[self.model]['h'][0]) & (hh < atm_models[self.model]['h'][1])
mask2 = (hh >= atm_models[self.model]['h'][1]) & (hh < atm_models[self.model]['h'][2])
mask3 = (hh >= atm_models[self.model]['h'][2]) & (hh < atm_models[self.model]['h'][3])
mask4 = (hh >= atm_models[self.model]['h'][3]) & (hh < h_max)
mask5 = hh >= h_max
return np.array([mask0, mask1, mask2, mask3, mask4, mask5])
def __get_X_masks(self, X, zenith):
mask0 = X > self._get_atmosphere(zenith, atm_models[self.model]['h'][0])
mask1 = (X <= self._get_atmosphere(zenith, atm_models[self.model]['h'][0])) & \
(X > self._get_atmosphere(zenith, atm_models[self.model]['h'][1]))
mask2 = (X <= self._get_atmosphere(zenith, atm_models[self.model]['h'][1])) & \
(X > self._get_atmosphere(zenith, atm_models[self.model]['h'][2]))
mask3 = (X <= self._get_atmosphere(zenith, atm_models[self.model]['h'][2])) & \
(X > self._get_atmosphere(zenith, atm_models[self.model]['h'][3]))
mask4 = (X <= self._get_atmosphere(zenith, atm_models[self.model]['h'][3])) & \
(X > self._get_atmosphere(zenith, h_max))
mask5 = X <= 0
return np.array([mask0, mask1, mask2, mask3, mask4, mask5])
def __get_arguments(self, mask, *args):
tmp = []
ones = np.ones(np.array(mask).size)
for a in args:
if np.shape(a) == ():
tmp.append(a * ones)
else:
tmp.append(a[mask])
return tmp
def get_atmosphere(self, zenith, h_low=0., h_up=np.infty):
""" returns the atmosphere for an air shower with given zenith angle (in g/cm^2) """
return self._get_atmosphere(zenith, h_low=h_low, h_up=h_up) * 1e-4
def _get_atmosphere(self, zenith, h_low=0., h_up=np.infty):
mask_flat, mask_taylor, mask_numeric = self.__get_method_mask(zenith)
mask_finite = np.array((h_up * np.ones_like(zenith)) < h_max)
is_mask_finite = np.sum(mask_finite)
tmp = np.zeros_like(zenith)
if np.sum(mask_numeric):
# print "getting numeric"
tmp[mask_numeric] = self._get_atmosphere_numeric(*self.__get_arguments(mask_numeric, zenith, h_low, h_up))
if np.sum(mask_taylor):
# print "getting taylor"
tmp[mask_taylor] = self._get_atmosphere_taylor(*self.__get_arguments(mask_taylor, zenith, h_low))
if(is_mask_finite):
# print "\t is finite"
mask_tmp = np.squeeze(mask_finite[mask_taylor])
tmp2 = self._get_atmosphere_taylor(*self.__get_arguments(mask_taylor, zenith, h_up))
tmp[mask_tmp] = tmp[mask_tmp] - np.array(tmp2)
if np.sum(mask_flat):
# print "getting flat atm"
tmp[mask_flat] = self._get_atmosphere_flat(*self.__get_arguments(mask_flat, zenith, h_low))
if(is_mask_finite):
mask_tmp = np.squeeze(mask_finite[mask_flat])
tmp2 = self._get_atmosphere_flat(*self.__get_arguments(mask_flat, zenith, h_up))
tmp[mask_tmp] = tmp[mask_tmp] - np.array(tmp2)
return tmp
def __get_zenith_a_indices(self, zeniths):
n = self.number_of_zeniths - 1
cosz_bins = np.linspace(0, n, self.number_of_zeniths, dtype=np.int)
cosz = np.array(np.round(np.cos(zeniths) * n), dtype=np.int)
tmp = np.squeeze([np.argwhere(t == cosz_bins) for t in cosz])
return tmp
def __get_a_from_cache(self, zeniths):
n = self.number_of_zeniths - 1
cosz_bins = np.linspace(0, n, self.number_of_zeniths, dtype=np.int)
cosz = np.array(np.round(np.cos(zeniths) * n), dtype=np.int)
a_indices = np.squeeze([np.argwhere(t == cosz_bins) for t in cosz])
cosz_bins_num = np.linspace(0, 1, self.number_of_zeniths)
# print "correction = ", (cosz_bins_num[a_indices] / np.cos(zeniths))
# print "a = ", self.a[a_indices]
a = ((self.a[a_indices]).T * (cosz_bins_num[a_indices] / np.cos(zeniths))).T
return a
def __get_a_from_interpolation(self, zeniths):
a = np.zeros((len(zeniths), 5))
for i in xrange(5):
a[..., i] = self.a_funcs[i](zeniths)
return a
def plot_a(self):
import matplotlib.pyplot as plt
zeniths = np.arccos(np.linspace(0, 1, self.number_of_zeniths))
mask = zeniths < np.deg2rad(83)
fig, ax = plt.subplots(1, 1)
x = np.rad2deg(zeniths[mask])
# mask2 = np.array([0, 1] * (np.sum(mask) / 2), dtype=np.bool)
ax.plot(x, self.a[..., 0][mask], ".", label="a0")
ax.plot(x, self.a[..., 1][mask], ".", label="a1")
ax.plot(x, self.a[..., 2][mask], ".", label="a2")
ax.plot(x, self.a[..., 3][mask], ".", label="a3")
ax.plot(x, self.a[..., 4][mask], ".", label="a4")
ax.set_xlim(0, 84)
ax.legend()
plt.tight_layout()
from scipy.interpolate import interp1d
for i in xrange(5):
y = self.a[..., i][mask]
f2 = interp1d(x, y, kind='cubic')
xxx = np.linspace(0, 81, 100)
ax.plot(xxx, f2(xxx), "-")
ax.set_ylim(-1e8, 1e8)
plt.show()
# tmp = (f2(x[~mask2][1:-1]) - y[~mask2][1:-1]) / y[~mask2][1:-1]
# print tmp.mean(), tmp.std()
# res = optimize.minimize(obj, x0=(-0.18, 1, 90))
# print res
def _get_atmosphere_taylor(self, zenith, h_low=0.):
b = self.b
c = self.c
# a_indices = self.__get_zenith_a_indices(zenith)
a = self.__get_a_from_interpolation(zenith)
# print "a indices are", a_indices , "-> ", a
masks = self.__get_height_masks(h_low)
tmp = np.zeros_like(zenith)
for iH, mask in enumerate(masks):
if(np.sum(mask)):
if(np.array(h_low).size == 1):
h = h_low
else:
h = h_low[mask]
# print "getting atmosphere taylor for layer ", iH
if iH < 4:
dldh = self._get_dldh(h, zenith[mask], iH)
tmp[mask] = np.array([a[..., iH][mask] + b[iH] * np.exp(-1 * h / c[iH]) * dldh]).squeeze()
elif iH == 4:
dldh = self._get_dldh(h, zenith[mask], iH)
tmp[mask] = np.array([a[..., iH][mask] - b[iH] * h / c[iH] * dldh])
else:
tmp[mask] = np.zeros(np.sum(mask))
return tmp
def _get_atmosphere_numeric(self, zenith, h_low=0, h_up=np.infty):
zenith = np.array(zenith)
tmp = np.zeros_like(zenith)
for i in xrange(len(tmp)):
from scipy import integrate
if(np.array(h_up).size == 1):
t_h_up = h_up
else:
t_h_up = h_up[i]
if(np.array(h_low).size == 1):
t_h_low = h_low
else:
t_h_low = h_low[i]
# if(np.array(zenith).size == 1):
# z = zenith
# else:
# z = zenith[i]
z = zenith[i]
# t_h_low = h_low[i]
# t_h_up = h_up[i]
if t_h_up <= t_h_low:
print "WARNING _get_atmosphere_numeric(): upper limit less than lower limit"
return np.nan
if t_h_up == np.infty:
t_h_up = h_max
b = t_h_up
d_low = get_distance_for_height_above_ground(t_h_low, z)
d_up = get_distance_for_height_above_ground(b, z)
# d_up_1 = d_low + 2.e3
# full_atm = 0
# points = get_distance_for_height_above_ground(atm_models[self.model]['h'], z).tolist()
full_atm = integrate.quad(self._get_density4,
d_low, d_up, args=(z,),
limit=500)[0]
# if d_up_1 > d_up:
# else:
# full_atm = integrate.quad(self._get_density4,
# d_low, d_up_1, args=(z,), limit=100, epsabs=1e-4)[0]
# full_atm += integrate.quad(self._get_density4,
# d_up_1, d_up, args=(z,), limit=100, epsabs=1e-4)[0]
# print "getting atmosphere numeric from ", d_low, "to ", d_up, ", = ", full_atm * 1e-4
tmp[i] = full_atm
return tmp
def _get_atmosphere_flat(self, zenith, h=0):
a = atm_models[self.model]['a']
b = atm_models[self.model]['b']
c = atm_models[self.model]['c']
layers = atm_models[self.model]['h']
y = np.where(h < layers[0], a[0] + b[0] * np.exp(-1 * h / c[0]), a[1] + b[1] * np.exp(-1 * h / c[1]))
y = np.where(h < layers[1], y, a[2] + b[2] * np.exp(-1 * h / c[2]))
y = np.where(h < layers[2], y, a[3] + b[3] * np.exp(-1 * h / c[3]))
y = np.where(h < layers[3], y, a[4] - b[4] * h / c[4])
y = np.where(h < h_max, y, 0)
# print "getting flat atmosphere from h=%.2f to infinity = %.2f" % (h, y / np.cos(zenith) * 1e-4)
return y / np.cos(zenith)
# def _get_atmosphere2(self, zenith, h_low=0., h_up=np.infty):
# if use_curved(zenith, self.curved):
# from scipy import integrate
# if h_up <= h_low:
# print "WARNING: upper limit less than lower limit"
# return np.nan
# if h_up == np.infty:
# h_up = h_max
# b = h_up
# d_low = get_distance_for_height_above_ground(h_low, zenith)
# d_up = get_distance_for_height_above_ground(b, zenith)
# d_up_1 = d_low + 2.e3
# if d_up_1 > d_up:
# full_atm = integrate.quad(self._get_density4,
# zenith, d_low, d_up, limit=100, epsabs=1e-2)[0]
# else:
# full_atm = integrate.quad(self._get_density4,
# zenith, d_low, d_up_1, limit=100, epsabs=1e-4)[0]
# full_atm += integrate.quad(self._get_density4,
# zenith, d_up_1, d_up, limit=100, epsabs=1e-2)[0]
# return full_atm
# else:
# return (_get_atmosphere(h_low, model=self.model) - _get_atmosphere(h_up, model=self.model)) / np.cos(zenith)
# def get_atmosphere3(self, h_low=0., h_up=np.infty):
# return self._get_atmosphere3(h_low=h_low, h_up=h_up) * 1e-4
#
# def _get_atmosphere3(self, h_low=0., h_up=np.infty):
# a = self.a
# b = self.b
# c = self.c
# h = h_low
# layers = atm_models[self.model]['h']
# dldh = self._get_dldh(h)
# y = np.where(h < layers[0], a[0] + b[0] * np.exp(-1 * h / c[0]) * dldh[0], a[1] + b[1] * np.exp(-1 * h / c[1]) * dldh[1])
# y = np.where(h < layers[1], y, a[2] + b[2] * np.exp(-1 * h / c[2]) * dldh[2])
# y = np.where(h < layers[2], y, a[3] + b[3] * np.exp(-1 * h / c[3]) * dldh[3])
# y = np.where(h < layers[3], y, a[4] - b[4] * h / c[4] * dldh[4])
# y = np.where(h < h_max, y, 0)
# return y
def get_vertical_height(self, zenith, xmax):
""" returns the (vertical) height above see level [in meters] as a function
of zenith angle and Xmax [in g/cm^2]
"""
return self._get_vertical_height(zenith, xmax * 1e4)
def _get_vertical_height(self, zenith, X):
mask_flat, mask_taylor, mask_numeric = self.__get_method_mask(zenith)
tmp = np.zeros_like(zenith)
if np.sum(mask_numeric):
print "get vertical height numeric", zenith
tmp[mask_numeric] = self._get_vertical_height_numeric(*self.__get_arguments(mask_numeric, zenith, X))
if np.sum(mask_taylor):
tmp[mask_taylor] = self._get_vertical_height_numeric_taylor(*self.__get_arguments(mask_taylor, zenith, X))
if np.sum(mask_flat):
print "get vertical height flat"
tmp[mask_flat] = self._get_vertical_height_flat(*self.__get_arguments(mask_flat, zenith, X))
return tmp
def __calculate_d(self):
zeniths = np.arccos(np.linspace(0, 1, self.number_of_zeniths))
d = np.zeros((self.number_of_zeniths, 4))
self.curved = True
self.__zenith_numeric = 0
for iZ, z in enumerate(zeniths):
z = np.array([z])
print "calculating constants for %.02f deg zenith angle (iZ = %i, nT = %i)..." % (np.rad2deg(z), iZ, self.n_taylor)
d[iZ][0] = 0
X1 = self._get_atmosphere(z, self.h[1])
d[iZ][1] = self._get_vertical_height_numeric(z, X1) - self._get_vertical_height_taylor_wo_constants(z, X1)
X2 = self._get_atmosphere(z, self.h[2])
d[iZ][2] = self._get_vertical_height_numeric(z, X2) - self._get_vertical_height_taylor_wo_constants(z, X2)
X3 = self._get_atmosphere(z, self.h[3])
d[iZ][3] = self._get_vertical_height_numeric(z, X3) - self._get_vertical_height_taylor_wo_constants(z, X3)
print "\t... d = ", d[iZ], " iZ = ", iZ
return d
def _get_vertical_height_taylor(self, zenith, X):
tmp = self._get_vertical_height_taylor_wo_constants(zenith, X)
masks = self.__get_X_masks(X, zenith)
d = self.d[self.__get_zenith_a_indices(zenith)]
for iX, mask in enumerate(masks):
if(np.sum(mask)):
if iX < 4:
print mask
print tmp[mask], len(tmp[mask])
print d[mask][..., iX]
tmp[mask] += d[mask][..., iX]
return tmp
def _get_vertical_height_taylor_wo_constants(self, zenith, X):
b = self.b
c = self.c
ct = np.cos(zenith)
T0 = self._get_atmosphere(zenith)
masks = self.__get_X_masks(X, zenith)
# Xs = [self._get_atmosphere(zenith, h) for h in self.h]
# d = np.array([self._get_vertical_height_numeric(zenith, t) for t in Xs])
tmp = np.zeros_like(zenith)
for iX, mask in enumerate(masks):
if(np.sum(mask)):
if iX < 4:
xx = X[mask] - T0[mask]
# print "iX < 4", iX
if self.n_taylor >= 1:
tmp[mask] = -c[iX] / b[iX] * ct[mask] * xx
if self.n_taylor >= 2:
tmp[mask] += -0.5 * c[iX] * (ct[mask] ** 2 * c[iX] - ct[mask] ** 2 * r_e - c[iX]) / (r_e * b[iX] ** 2) * xx ** 2
if self.n_taylor >= 3:
tmp[mask] += -1. / 6. * c[iX] * ct[mask] * (3 * ct[mask] ** 2 * c[iX] ** 2 - 4 * ct[mask] ** 2 * r_e * c[iX] + 2 * r_e ** 2 * ct[mask] ** 2 - 3 * c[iX] ** 2 + 4 * r_e * c[iX]) / (r_e ** 2 * b[iX] ** 3) * xx ** 3
if self.n_taylor >= 4:
tmp[mask] += -1. / (24. * r_e ** 3 * b[iX] ** 4) * c[iX] * (15 * ct[mask] ** 4 * c[iX] ** 3 - 25 * c[iX] ** 2 * r_e * ct[mask] ** 4 + 18 * c[iX] * r_e ** 2 * ct[mask] ** 4 - 6 * r_e ** 3 * ct[mask] ** 4 - 18 * c[iX] ** 3 * ct[mask] ** 2 + 29 * c[iX] ** 2 * r_e * ct[mask] ** 2 - 18 * c[iX] * r_e ** 2 * ct[mask] ** 2 + 3 * c[iX] ** 3 - 4 * c[iX] ** 2 * r_e) * xx ** 4
if self.n_taylor >= 5:
tmp[mask] += -1. / (120. * r_e ** 4 * b[iX] ** 5) * c[iX] * ct[mask] * (ct[mask] ** 4 * (105 * c[iX] ** 4 - 210 * c[iX] ** 3 * r_e + 190 * c[iX] ** 2 * r_e ** 2 - 96 * c[iX] * r_e ** 3 + 24 * r_e ** 4) + ct[mask] ** 2 * (-150 * c[iX] ** 4 + 288 * c[iX] ** 3 * r_e - 242 * c[iX] ** 2 * r_e ** 2 + 96 * c[iX] * r_e ** 3) + 45 * c[iX] ** 4 - 78 * r_e * c[iX] ** 3 + 52 * r_e ** 2 * c[iX] ** 2) * xx ** 5
if self.n_taylor >= 6:
tmp[mask] += -1. / (720. * r_e ** 5 * b[iX] ** 6) * c[iX] * (ct[mask] ** 6 * (945 * c[iX] ** 5 - 2205 * c[iX] ** 4 * r_e + 2380 * c[iX] ** 3 * r_e ** 2 - 1526 * c[iX] ** 2 * r_e ** 3 + 600 * c[iX] * r_e ** 4 - 120 * r_e ** 5) + ct[mask] ** 4 * (-1575 * c[iX] ** 5 + 3528 * c[iX] ** 4 * r_e - 3600 * c[iX] ** 3 * r_e ** 2 + 2074 * c[iX] ** 2 * r_e ** 3 - 600 * c[iX] * r_e ** 4) + ct[mask] ** 2 * (675 * c[iX] ** 5 - 1401 * c[iX] ** 4 * r_e - 1272 * c[iX] ** 3 * r_e ** 2 - 548 * c[iX] ** 2 * r_e ** 3) - 45 * c[iX] ** 5 + 78 * c[iX] ** 4 * r_e - 52 * c[iX] ** 3 * r_e ** 2) * xx ** 6
elif iX == 4:
print "iX == 4", iX
# numeric fallback
tmp[mask] = self._get_vertical_height_numeric(zenith, X)
else:
print "iX > 4", iX
tmp[mask] = np.ones_like(mask) * h_max
return tmp
def _get_vertical_height_numeric(self, zenith, X):
from scipy import optimize
tmp = np.zeros_like(zenith)
zenith = np.array(zenith)
for i in xrange(len(tmp)):
x0 = get_distance_for_height_above_ground(self._get_vertical_height_flat(zenith[i], X[i]), zenith[i])
def ftmp(d, zenith, xmax, observation_level=0):
h = get_height_above_ground(d, zenith, observation_level=observation_level)
h += observation_level
tmp = self._get_atmosphere_numeric([zenith], h_low=h)
dtmp = tmp - xmax
return dtmp
dxmax_geo = optimize.brentq(ftmp, -1e3, x0 + 1e4, xtol=1e-6,
args=(zenith[i], X[i]))
tmp[i] = get_height_above_ground(dxmax_geo, zenith[i])
return tmp
def _get_vertical_height_numeric_taylor(self, zenith, X):
from scipy import optimize
tmp = np.zeros_like(zenith)
zenith = np.array(zenith)
for i in xrange(len(tmp)):
if(X[i] < 0):
X[i] = 0
x0 = get_distance_for_height_above_ground(self._get_vertical_height_flat(zenith[i], X[i]), zenith[i])
def ftmp(d, zenith, xmax, observation_level=0):
h = get_height_above_ground(d, zenith, observation_level=observation_level)
h += observation_level
tmp = self._get_atmosphere_taylor(np.array([zenith]), h_low=np.array([h]))
dtmp = tmp - xmax
return dtmp
print zenith[i], X[i]
dxmax_geo = optimize.brentq(ftmp, -1e3, x0 + 1e4, xtol=1e-6,
args=(zenith[i], X[i]))
tmp[i] = get_height_above_ground(dxmax_geo, zenith[i])
return tmp
def _get_vertical_height_flat(self, zenith, X):
return _get_vertical_height(X * np.cos(zenith), model=self.model)
def get_density(self, zenith, xmax):
""" returns the atmospheric density as a function of zenith angle
and shower maximum Xmax (in g/cm^2) """
return self._get_density(zenith, xmax * 1e4)
def _get_density(self, zenith, xmax):
""" returns the atmospheric density as a function of zenith angle
and shower maximum Xmax """
h = self._get_vertical_height(zenith, xmax)
print h
rho = get_density(h, model=self.model)
return rho
# def __get_density2_curved(self, xmax):
# dxmax_geo = self._get_distance_xmax_geometric(xmax, observation_level=0)
# return self._get_density4(dxmax_geo)
#
def _get_density4(self, d, zenith):
h = get_height_above_ground(d, zenith)
return get_density(h, model=self.model)
def get_distance_xmax(self, zenith, xmax, observation_level=1564.):
""" input:
- xmax in g/cm^2
- zenith in radians
output: distance to xmax in g/cm^2
"""
dxmax = self._get_distance_xmax(zenith, xmax * 1e4, observation_level=observation_level)
return dxmax * 1e-4
def _get_distance_xmax(self, zenith, xmax, observation_level=1564.):
return self._get_atmosphere(zenith, h_low=observation_level) - xmax
def get_distance_xmax_geometric(self, zenith, xmax, observation_level=1564.):
""" input:
- xmax in g/cm^2
- zenith in radians
output: distance to xmax in m
"""
return self._get_distance_xmax_geometric(zenith, xmax * 1e4,
observation_level=observation_level)
def _get_distance_xmax_geometric(self, zenith, xmax, observation_level=1564.):
h = self._get_vertical_height(zenith, xmax)
return get_distance_for_height_above_ground(h, zenith, observation_level)
# def __get_distance_xmax_geometric_flat(self, xmax, observation_level=1564.):
# # _get_vertical_height(xmax, self.model)
# # dxmax = self._get_distance_xmax(xmax, observation_level=observation_level)
# # txmax = _get_atmosphere(observation_level, model=self.model) - dxmax * np.cos(self.zenith)
# # height = _get_vertical_height(txmax)
# # return (height - observation_level) / np.cos(self.zenith)
# #
# height = _get_vertical_height(xmax * np.cos(self.zenith)) - observation_level
# return height / np.cos(self.zenith)
# full = _get_atmosphere(observation_level, model=self.model) / np.cos(self.zenith)
# dxmax = full - xmax
# height = _get_vertical_height(_get_atmosphere(0, model=self.model) - dxmax * np.cos(self.zenith))
# return height / np.cos(self.zenith)
# def get_distance_xmax_geometric2(xmax, zenith, observation_level=1564.,
# model=1, curved=False):
# """ input:
# - xmax in g/cm^2
# - zenith in radians
# output: distance to xmax in m
# """
# return _get_distance_xmax_geometric2(zenith, xmax * 1e4,
# observation_level=observation_level,
# model=model, curved=curved)
# def _get_distance_xmax_geometric2(zenith, xmax, observation_level=1564.,
# model=default_model,
# curved=default_curved):
# if curved:
# from scipy import optimize
# x0 = _get_distance_xmax_geometric(zenith, xmax,
# observation_level=observation_level,
# model=model, curved=False)
#
# def ftmp(d, dxmax, zenith, observation_level):
# h = get_height_above_ground(d, zenith, observation_level=observation_level)
# h += observation_level
# dtmp = _get_atmosphere2(zenith, h_low=observation_level, h_up=h, model=model) - dxmax
# print "d = %.5g, h = %.5g, dtmp = %.5g" % (d, h, dtmp)
# return dtmp
#
# dxmax = _get_distance_xmax(xmax, zenith, observation_level=observation_level, curved=True)
# print "distance to xmax = ", dxmax
# tolerance = max(1e-3, x0 * 1.e-6)
# dxmax_geo = optimize.newton(ftmp, x0=x0, maxiter=100, tol=tolerance,
# args=(dxmax, zenith, observation_level))
# # print "x0 = %.7g, dxmax_geo = %.7g" % (x0, dxmax_geo)
# return dxmax_geo
# else:
# dxmax = _get_distance_xmax(xmax, zenith, observation_level=observation_level,
# model=model, curved=False)
# xmax = _get_atmosphere(observation_level, model=model) - dxmax * np.cos(zenith)
# height = _get_vertical_height(xmax)
# return (height - observation_level) / np.cos(zenith)
# =============================================================================
# setting up test suite
# =============================================================================
class TestAtmosphericFunctions(unittest.TestCase):
def test_height_above_ground_to_distance_transformation(self):
zeniths = np.deg2rad(np.linspace(0, 90, 10))
for zenith in zeniths:
heights = np.linspace(0, 1e5, 20)
for h in heights:
obs_levels = np.linspace(0, 2e3, 4)
for obs in obs_levels:
d = get_distance_for_height_above_ground(h, zenith, observation_level=obs)
h2 = get_height_above_ground(d, zenith, observation_level=obs)
self.assertAlmostEqual(h, h2)
def test_flat_atmosphere(self):
atm = Atmosphere(curved=False)
zeniths = np.deg2rad(np.linspace(0, 89, 10))
heights = np.linspace(0, 1e4, 10)
atm1 = atm.get_atmosphere(zeniths, heights)
atm2 = atm.get_atmosphere(np.zeros(10), heights) / np.cos(zeniths)
for i in xrange(len(atm1)):
self.assertAlmostEqual(atm1[i], atm2[i])
heights2 = np.linspace(1e4, 1e5, 10)
atm1 = atm.get_atmosphere(zeniths, heights, heights2)
atm2 = atm.get_atmosphere(np.zeros(10), heights, heights2) / np.cos(zeniths)
for i in xrange(len(atm1)):
self.assertAlmostEqual(atm1[i], atm2[i])
z = np.deg2rad(50)
atm1 = atm.get_atmosphere(z, 0)
atm2 = atm.get_atmosphere(0, 0) / np.cos(z)
self.assertAlmostEqual(atm1, atm2, delta=1e-3)
atm1 = atm.get_atmosphere(z, 10, 1e4)
atm2 = atm.get_atmosphere(0, 10, 1e4) / np.cos(z)
self.assertAlmostEqual(atm1, atm2, delta=1e-3)
def test_numeric_atmosphere(self):
atm_flat = Atmosphere(curved=False)
atm_num = Atmosphere(curved=True, zenith_numeric=0)
zeniths = np.deg2rad(np.linspace(0, 20, 3))
atm1 = atm_flat.get_atmosphere(zeniths, 0)
atm2 = atm_num.get_atmosphere(zeniths, 0)
for i in xrange(len(atm1)):
delta = 1e-3 + np.rad2deg(zeniths[i]) * 1e-2
self.assertAlmostEqual(atm1[i], atm2[i], delta=delta)
atm1 = atm_flat.get_atmosphere(zeniths, 1e3)
atm2 = atm_num.get_atmosphere(zeniths, 1e3)
for i in xrange(len(atm1)):
delta = 1e-3 + np.rad2deg(zeniths[i]) * 1e-2
self.assertAlmostEqual(atm1[i], atm2[i], delta=delta)
atm1 = atm_flat.get_atmosphere(zeniths, 1e3, 1e4)
atm2 = atm_num.get_atmosphere(zeniths, 1e3, 1e4)
for i in xrange(len(atm1)):
delta = 1e-3 + np.rad2deg(zeniths[i]) * 1e-2
self.assertAlmostEqual(atm1[i], atm2[i], delta=delta)
z = np.deg2rad(0)
atm1 = atm_flat.get_atmosphere(z, 0)
atm2 = atm_num.get_atmosphere(z, 0)
self.assertAlmostEqual(atm1, atm2, delta=1e-3)
atm1 = atm_flat.get_atmosphere(z, 10, 1e4)
atm2 = atm_num.get_atmosphere(z, 10, 1e4)
self.assertAlmostEqual(atm1, atm2, delta=1e-2)
def test_taylor_atmosphere(self):
atm_taylor = Atmosphere(curved=True)
atm_num = Atmosphere(curved=True, zenith_numeric=0)
for h in np.linspace(0, 1e4, 10):
atm1 = atm_taylor.get_atmosphere(0, h_low=h)
atm2 = atm_num.get_atmosphere(0, h_low=h)
self.assertAlmostEqual(atm1, atm2, delta=1e-3)
zeniths = np.deg2rad([0, 11.478341, 30.683417])
for i in xrange(len(zeniths)):
delta = 1e-6
atm1 = atm_taylor.get_atmosphere(zeniths[i], 0)
atm2 = atm_num.get_atmosphere(zeniths[i], 0)
self.assertAlmostEqual(atm1, atm2, delta=delta)
atm1 = atm_taylor.get_atmosphere(zeniths, 1e3)
atm2 = atm_num.get_atmosphere(zeniths, 1e3)
for i in xrange(len(atm1)):
delta = 1e-5
self.assertAlmostEqual(atm1[i], atm2[i], delta=delta)
atm1 = atm_taylor.get_atmosphere(zeniths, 1e3, 1e4)
atm2 = atm_num.get_atmosphere(zeniths, 1e3, 1e4)
for i in xrange(len(atm1)):
delta = 1e-5
self.assertAlmostEqual(atm1[i], atm2[i], delta=delta)
z = np.deg2rad(0)
atm1 = atm_taylor.get_atmosphere(z, 0)
atm2 = atm_num.get_atmosphere(z, 0)
self.assertAlmostEqual(atm1, atm2, delta=1e-3)
atm1 = atm_taylor.get_atmosphere(z, 10, 1e4)
atm2 = atm_num.get_atmosphere(z, 10, 1e4)
self.assertAlmostEqual(atm1, atm2, delta=1e-2)
def test_taylor_atmosphere2(self):
atm_taylor = Atmosphere(curved=True)
atm_num = Atmosphere(curved=True, zenith_numeric=0)
zeniths = np.deg2rad(np.linspace(0, 83, 20))
for i in xrange(len(zeniths)):
delta = 1e-3
# print "checking z = %.1f" % np.rad2deg(zeniths[i])
atm1 = atm_taylor.get_atmosphere(zeniths[i], 0)
atm2 = atm_num.get_atmosphere(zeniths[i], 0)
delta = max(delta, 1.e-5 * atm1)
self.assertAlmostEqual(atm1, atm2, delta=delta)
zeniths = np.deg2rad(np.linspace(0, 83, 20))
for i in xrange(len(zeniths)):
delta = 1e-2
# print "checking z = %.1f" % np.rad2deg(zeniths[i])
atm1 = atm_taylor.get_atmosphere(zeniths[i], 1e3)
atm2 = atm_num.get_atmosphere(zeniths[i], 1e3)
self.assertAlmostEqual(atm1, atm2, delta=delta)
zeniths = np.deg2rad(np.linspace(0, 83, 20))
for i in xrange(len(zeniths)):
delta = 1e-2
# print "checking z = %.1f" % np.rad2deg(zeniths[i])
atm1 = atm_taylor.get_atmosphere(zeniths[i], 0, 1e4)
atm2 = atm_num.get_atmosphere(zeniths[i], 0, 1e4)
self.assertAlmostEqual(atm1, atm2, delta=delta)
def test_vertical_height_flat_numeric(self):
atm_flat = Atmosphere(curved=False)
atm_num = Atmosphere(curved=True, zenith_numeric=0)
zenith = 0
xmax = np.linspace(300, 900, 20)
atm1 = atm_flat.get_vertical_height(zenith * np.ones_like(xmax), xmax)
atm2 = atm_num.get_vertical_height(zenith * np.ones_like(xmax), xmax)
for i in xrange(len(xmax)):
self.assertAlmostEqual(atm1[i], atm2[i], delta=1e-2)
zeniths = np.deg2rad(np.linspace(0, 30, 4))
xmax = 600
atm1 = atm_flat.get_vertical_height(zeniths, xmax)
atm2 = atm_num.get_vertical_height(zeniths, xmax)
for i in xrange(len(zeniths)):
self.assertAlmostEqual(atm1[i], atm2[i], delta=1e-3 * atm1[i])
def test_vertical_height_taylor_numeric(self):
atm_taylor = Atmosphere(curved=True)
atm_num = Atmosphere(curved=True, zenith_numeric=0)
zeniths = np.deg2rad(np.linspace(0, 85, 30))
xmax = 600
atm1 = atm_taylor.get_vertical_height(zeniths, xmax)
atm2 = atm_num.get_vertical_height(zeniths, xmax)
for i in xrange(len(zeniths)):
# print "zenith = ", np.rad2deg(zeniths[i])
self.assertAlmostEqual(atm1[i], atm2[i], delta=2e-5 * atm1[i])
#
# def test_atmosphere_above_height_for_flat_atm(self):
# curved = False
# zeniths = np.deg2rad(np.linspace(0, 70, 8))
# for zenith in zeniths:
# catm = Atmosphere(zenith, curved=curved)
# heights = np.linspace(0, 1e5, 20)
# for h in heights:
# atm = get_atmosphere(h) / np.cos(zenith)
# atm2 = catm.get_atmosphere2(h_low=h)
# self.assertAlmostEqual(atm, atm2)
#
# def test_density_for_flat_atm(self):
# curved = False
# zeniths = np.deg2rad(np.linspace(0, 70, 8))
# for zenith in zeniths:
# catm = Atmosphere(zenith, curved=curved)
# heights = np.linspace(0, 1e5, 20)
# for h in heights:
# rho = get_density(h)
# xmax = catm.get_atmosphere2(h_low=h)
# rho2 = catm.get_density2(xmax)
# self.assertAlmostEqual(rho, rho2)
#
# def test_numerical_density_integration(self):
#
# def allowed_discrepancy(zenith):
# z = np.rad2deg(zenith)
# return z ** 2 / 2500 + z / 90. + 1e-2
#
# zeniths = np.deg2rad(np.linspace(0, 40, 5))
# for zenith in zeniths:
# catm = Atmosphere(zenith)
# heights = np.linspace(0, 1e4, 2)
# for h in heights:
# atm1 = get_atmosphere(h) / np.cos(zenith)
# atm2 = catm.get_atmosphere2(h_low=h)
# self.assertAlmostEqual(atm1, atm2, delta=allowed_discrepancy(zenith))
#
# def test_get_distance_to_xmax_flat_vs_curved(self):
#
# def allowed_discrepancy(zenith):
# z = np.rad2deg(zenith)
# return z ** 2 / 2500 + z / 90. + 1e-2
#
# zeniths = np.deg2rad(np.linspace(0, 40, 5))
# for zenith in zeniths:
# catm = Atmosphere(zenith)
# catm_flat = Atmosphere(zenith, curved=False)
# xmaxs = np.linspace(0, 1e3, 4)
# for xmax in xmaxs:
# dxmax1 = catm_flat.get_distance_xmax(xmax, observation_level=0)
# dxmax2 = catm.get_distance_xmax(xmax, observation_level=0)
# # print "zenith %.0f xmax = %.2g, %.5g, %.5g" % (np.rad2deg(zenith), xmax, dxmax1, dxmax2)
# self.assertAlmostEqual(dxmax1, dxmax2, delta=allowed_discrepancy(zenith))
#
# def test_get_distance_to_xmax_geometric_flat_self_consitency(self):
# # print
# # print
# # print "test_get_distance_to_xmax_geometric_flat_self_consitency"
# zeniths = np.deg2rad(np.linspace(0, 80, 9))
# dxmaxs = np.linspace(0, 4e3, 5)
# obs_levels = np.linspace(0, 2e3, 4)
# for dxmax1 in dxmaxs:
# for zenith in zeniths:
# catm = Atmosphere(zenith, curved=False)
# for obs in obs_levels:
# # print "\tdxmax1 = %.4f, z=%.1f observation level = %.2f" % (dxmax1, np.rad2deg(zenith), obs)
# h1 = dxmax1 * np.cos(zenith) + obs
# xmax = get_atmosphere(h1) / np.cos(zenith)
# dxmax2 = catm.get_distance_xmax_geometric(xmax, observation_level=obs)
# self.assertAlmostEqual(dxmax1, dxmax2, delta=1e-5)
#
# def test_get_distance_to_xmax_geometric_curved_self_consitency(self):
# # print
# # print
# # print "test_get_distance_to_xmax_geometric_curved_self_consitency"
# zeniths = np.deg2rad(np.linspace(0, 89, 10))
# dxmaxs = np.linspace(0, 4e3, 5)
# obs_levels = np.linspace(0, 2e3, 5)
# for dxmax1 in dxmaxs:
# # print "checking dxmax = %.2f" % dxmax1
# for zenith in zeniths:
# # print "checking zenith angle of %.1f" % (np.rad2deg(zenith))
# catm = Atmosphere(zenith)
# delta = 1e-4
# if zenith > np.deg2rad(85):
# delta = 1.e-2
# for obs in obs_levels:
# # print "\tdxmax1 = %.4f, z=%.1f observation level = %.2f" % (dxmax1, np.rad2deg(zenith), obs)
# # print "testing"
# h1 = get_height_above_ground(dxmax1, zenith, observation_level=obs) + obs
# xmax = catm.get_atmosphere2(h_low=h1)
# # print "zenith %.0f dmax = %.2g, obslevel = %.3g -> h1 = %.3g, xmax = %.3g" % (np.rad2deg(zenith), dxmax1, obs, h1, xmax)
# dxmax2 = catm.get_distance_xmax_geometric(xmax, observation_level=obs)
# self.assertAlmostEqual(dxmax1, dxmax2, delta=delta)
#
# def test_get_distance_to_xmax_geometric_flat_vs_curved(self):
# # print
# # print
# # print "test_get_distance_to_xmax_geometric_flat_vs_curved"
#
# def allowed_discrepancy(zenith):
# z = np.rad2deg(zenith)
# return z ** 2 / 20.**2 * 1e-3 + 1e-3
#
# zeniths = np.deg2rad(np.linspace(0, 60, 7))
# xmaxs = np.linspace(100, 900, 4)
# obs_levels = np.linspace(0, 1.5e3, 4)
# for zenith in zeniths:
# catm = Atmosphere(zenith)
# catm_flat = Atmosphere(zenith, curved=False)
# for xmax in xmaxs:
# for obs in obs_levels:
# dxmax1 = catm_flat.get_distance_xmax_geometric(xmax, observation_level=obs)
# dxmax2 = catm.get_distance_xmax_geometric(xmax, observation_level=obs)
# # print "zenith %.0f xmax = %.2g, obslevel = %.3g, %.5g, %.5g %.2g" % (np.rad2deg(zenith), xmax, obs, dxmax1, dxmax2, 3 + np.abs(dxmax1 * allowed_discrepancy(zenith)))
# self.assertAlmostEqual(dxmax1, dxmax2, delta=3. + np.abs(dxmax1 * allowed_discrepancy(zenith)))
# def test_get_distance_to_xmax_geometric_flat_vs_curved2(self):
#
# def allowed_discrepancy(zenith):
# z = np.rad2deg(zenith)
# return z ** 2 / 20.**2 * 1e-3 + 1e-3
#
# zeniths = np.deg2rad(np.linspace(0, 60, 7))
# xmaxs = np.linspace(0, 900, 4)
# obs_levels = np.linspace(0, 1.5e3, 4)
# for zenith in zeniths:
# for xmax in xmaxs:
# for obs in obs_levels:
# print
# print "testing "
# dxmax1 = get_distance_xmax_geometric2(xmax, zenith, observation_level=obs, curved=False)
# if dxmax1 < 0:
# print "\t skipping negetive distances"
# continue
# print "zenith %.0f xmax = %.2g, obslevel = %.3g, %.5g" % (np.rad2deg(zenith), xmax, obs, dxmax1)
# dxmax2 = get_distance_xmax_geometric2(xmax, zenith, observation_level=obs, curved=True)
# print "zenith %.0f xmax = %.2g, obslevel = %.3g, %.5g, %.5g %.2g" % (np.rad2deg(zenith), xmax, obs, dxmax1, dxmax2, 1e-1 + np.abs(dxmax1 * allowed_discrepancy(zenith)))
# self.assertAlmostEqual(dxmax1, dxmax2, delta=3. + np.abs(dxmax1 * allowed_discrepancy(zenith)))
if __name__ == "__main__":
unittest.main()
import matplotlib.pyplot as plt
zenith = np.deg2rad(80)
catm_t0 = Atmosphere(zenith, n_taylor=0)
catm_t1 = Atmosphere(zenith, n_taylor=1)
catm_t2 = Atmosphere(zenith, n_taylor=2)
catm_t3 = Atmosphere(zenith, n_taylor=3)
catm_t4 = Atmosphere(zenith, n_taylor=4)
catm_t5 = Atmosphere(zenith, n_taylor=5)
catm_flat = Atmosphere(zenith, curved=False)
fig, ax = plt.subplots(1, 1)
hh = np.linspace(0, h_max, 300)
atm1 = np.array([catm_flat.get_atmosphere2(h) for h in hh])
atm2_5 = np.array([catm_t5.get_atmosphere3(h_low=h) for h in hh])
atm2_4 = np.array([catm_t4.get_atmosphere3(h_low=h) for h in hh])
atm2_3 = np.array([catm_t3.get_atmosphere3(h_low=h) for h in hh])
atm2_2 = np.array([catm_t2.get_atmosphere3(h_low=h) for h in hh])
atm2_1 = np.array([catm_t1.get_atmosphere3(h_low=h) for h in hh])
atm2_0 = np.array([catm_t0.get_atmosphere3(h_low=h) for h in hh])
atm3 = np.array([catm_t3.get_atmosphere2(h_low=h) for h in hh])
# ax.plot(hh, atm1, label="flat")
ax.plot(hh, atm3, "-", label="numeric")
ax.plot(hh, atm2_5, "-", label="n=5")
ax.plot(hh, atm2_4, "-", label="n=4")
ax.plot(hh, atm2_3, "-", label="n=3")
ax.plot(hh, atm2_2, "--", label="n=2")
ax.plot(hh, atm2_1, "--", label="n=1")
ax.plot(hh, atm2_0, "--", label="n=0")
ax.semilogy(True)
h200 = catm_t3.get_vertical_height2(200)
ax.text(0.75, 0.9, r"$\theta = %.0f^\circ$" % np.rad2deg(zenith),
transform=ax.transAxes, size="xx-large")
# ax.plot([h200, h200], [1e3, 1e9], "--k")
plt.tight_layout()
ax.legend(bbox_to_anchor=(1.05, 1), loc=2, ncol=1, borderaxespad=0.)
ax.set_xlabel("vertical height above sea level [m]")
ax.set_ylabel(r"atmosphere overburden [g/cm$^2$]")
fig.subplots_adjust(left=0.1, right=0.75)
# fig.savefig("atmosphere_overburden_89deg.pdf")
plt.show()
# ax2.plot(hh, (atm1 - atm3) / atm3 * 100., label="flat")
ax2.plot(hh, (atm2_0 - atm3) / atm3 * 100., "--", label="n=0")
ax2.plot(hh, (atm2_1 - atm3) / atm3 * 100., "--", label="n=1")
ax2.plot(hh, (atm2_2 - atm3) / atm3 * 100., "--", label="n=2")
ax2.plot(hh, (atm2_3 - atm3) / atm3 * 100., "-", label="n=3")
ax2.plot(hh, (atm2_4 - atm3) / atm3 * 100., "-", label="n=4")
ax2.plot(hh, (atm2_5 - atm3) / atm3 * 100., "-", label="n=5")
ax2.set_ylim(-10, 10)
ax2.plot([h200, h200], [-10, 10], "--k", label=r"X = 200 g/cm$^2$")
ax2.legend(bbox_to_anchor=(1, 1), loc='upper left', ncol=2)
plt.tight_layout()
plt.show()
dxmax_geos = np.linspace(1e3, 1e6, 10)
for dgeo in dxmax_geos:
h = get_height_above_ground(dgeo, zenith)
Dxmax = get_atmosphere2(zenith, h_low=0, h_up=h)
full_atm = get_atmosphere2(zenith, h_low=0)
xmax = full_atm - Dxmax
print "\tdxmax = %.2g, full_atm = %.2g, xmax = %.2g" % (Dxmax, full_atm, xmax)
dgeo2 = get_distance_xmax_geometric(xmax, zenith, observation_level=0, curved=True)
h2 = get_height_above_ground(dgeo2, zenith)
print "dgeo 1 = %.2g, dgeo2 = %.2g, h1 = %.2g, h2 = %.2g" % (dgeo, dgeo2, h, h2)
# # some unit tests:
heights = np.linspace(0, 1e5, 10)
for h in heights:
X = get_atmosphere2(zenith, h_low=h)
h2 = get_vertical_height2(zenith, X, curved=True)
print "starting with height h = %.2g m -> X = %.2g g/cm^2 -> h = %.2g" % (h, X, h2)
dxmax_geo = get_distance_for_height_above_ground(h, zenith)
dxmax_geo2 = get_distance_xmax_geometric(X, zenith, observation_level=0, curved=True)
print "\t dxmax geo 1 = %.2g, dxmaxgeo2 = %.2g" % (dxmax_geo, dxmax_geo2)
a = 1 / 0
xmax = 669.
zeniths = np.deg2rad(np.arange(0, 81, .5))
rho_curved = np.zeros_like(zeniths)
rho_flat = np.zeros_like(zeniths)
for i, z in enumerate(zeniths):
rho_flat[i] = get_density2(z, xmax, curved=False, model=1)
rho_curved[i] = get_density2(z, xmax, curved=True, model=1)
fig, ax = plt.subplots(1, 1)
ax.plot(np.rad2deg(zeniths), (rho_flat - rho_curved) / rho_curved * 100., label=r"$X_\mathrm{max} = %.0f g/cm^2$" % xmax)
ax.set_xlabel("zenith angle [deg]")
ax.set_ylabel(r"$(\rho_\mathrm{flat}(X_\mathrm{max}) - \rho_\mathrm{curved}(X_\mathrm{max}))/\rho_\mathrm{curved}(X_\mathrm{max})$ [%]")
ax.legend()
ax.set_ylim(-10, 1)
plt.tight_layout()
fig.savefig("average_xmax_density_zenith.png")
plt.show()
a = 1 / 0
print "full atm 90"
print _get_full_atmosphere(np.deg2rad(90), 0)
zeniths = np.deg2rad(np.arange(0, 90, 1))
atm = np.zeros_like(zeniths)
atm_curved = np.zeros_like(zeniths)
fig, (ax, ax2) = plt.subplots(1, 2)
for i, z in enumerate(zeniths):
atm[i] = _get_atmosphere(0) / np.cos(z)
atm_curved[i] = _get_full_atmosphere(z, observation_level=0)
dd = np.linspace(1, 40e3, 100)
# ax.plot(dd, (get_density(dd*np.cos(z)) - _get_density4(dd, z)) / _get_density4(dd, z), label="%.0f" % np.rad2deg(z))
# ax2.plot(dd, (get_density(dd*np.cos(z)) - _get_density4(dd, z)), label="%.0f" % np.rad2deg(z))
# ax.legend()
ax.plot(np.rad2deg(zeniths), atm, label="flat atm")
ax.plot(np.rad2deg(zeniths), atm_curved, label="curved atm")
ax2.plot(np.rad2deg(zeniths), (atm - atm_curved) / atm_curved, label="curved atm")
ax2.semilogy(True)
plt.tight_layout()
plt.show()
a = 1 / 0
from scipy import integrate
dd = np.linspace(0, 5000, 50) * 1e3
fig, ax = plt.subplots(1, 2)
colors = ["b", "g", "m", "r"]
for i, z in enumerate(np.deg2rad([0, 70, 60, 80])):
ax[0].plot(dd, get_density(dd * np.cos(z)), "%s-" % colors[i] , label="%.0f" % np.rad2deg(z))
ax[0].plot(dd, _get_density4(dd, z), "%s--" % colors[i], label="%.0f" % np.rad2deg(z))
def tmp1(x):
return get_density(x * 1e-2 * np.cos(z))
atmabove = [integrate.quad(tmp1, 0, t)[0] for t in dd * 1e2]
ax[1].plot(dd, atmabove, "%s-" % colors[i], label="%.0f" % np.rad2deg(z))
def tmp(x):
return _get_density4(x * 1e-2, z)
atmabove = [integrate.quad(tmp, 0, t)[0] for t in dd * 1e2]
ax[1].plot(dd, atmabove, "%s--" % colors[i], label="%.0f" % np.rad2deg(z))
ax[0].legend()
ax[0].set_xlabel("shower path length [m]")
ax[1].set_xlabel("shower path length [m]")
ax[0].set_ylabel("density [g/m^3]")
ax[1].set_ylabel("atmospheric depth [g/cm^2]")
ax[1].legend()
plt.tight_layout()
plt.show()
| gpl-3.0 |
stijnvanhoey/pyFUSE | pyfuse/utilities/linres_compare.py | 1 | 11212 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 23 16:14:13 2012
@author: VHOEYS
"""
import os
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from scipy import arange, array, exp
from scipy import special
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import MultipleLocator,MaxNLocator,LinearLocator,FixedLocator
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['lines.color'] = 'k'
mpl.rcParams['xtick.labelsize'] = 20
def extrap1d(interpolator):
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
return array(map(pointwise, array(xs)))
return ufunclike
def linres(n_res,q_init,cov,co,k): #nog VHM gewijs met cov te doen van vorige tijdstap
if n_res==1:
# print q_init[0],'qinit1'
# print np.exp(-1/k),'ewp'
q_init[0]=q_init[0]*np.exp(-1./k) + (co+cov)*(1 - np.exp(-1/k))/2
# print q_init[0],'qinit2'
return q_init
else:
q_init[n_res-1]=q_init[n_res-1]* np.exp(-1/k)+linres(n_res-1,q_init,cov,co,k)[n_res-2]*(1 - np.exp(-1/k))
return q_init
def deriv(u,t,rain,const):
# print t
k=0.2
RAIN=rain(t)
# print RAIN
du1 = RAIN-k*u[0]
du2 = k*u[0] -k*u[1]
du3 = k*u[1] -k*u[2]
return np.array([du1,du2,du3])
def NAM_LSODA(const,InitCond,rain):
totn = rain.size
# print totn
#Define time-steps to give output for
##################################
totaltime=np.float(totn)
time=np.arange(0,totaltime,1.)
# print time
# time= np.linspace(0.0,totaltime,totaltime) #eg. hours if datainput hours and timestep=1.0; geeft mogelijkheid om met daginput ook uur-output te verkrijgen. NIET met uur ook dag (teveel gezever ivm hoe uurlijke info omzetten in dag, kan beter preprocessing zijn)
# print time
#Prepare timeseries for linear interpolation in substepping
##################################
rain_int = interp1d(time,rain,bounds_error=False)
# rain_int2 = extrap1d(rain_int)
# f_i = interp1d(x, y)
# f_x = extrap1d(f_i)
#Solve with LSODA scheme
##################################
y=odeint(deriv,InitCond,time,full_output=0, printmessg=True, args=(rain_int,const),hmax=1.)
#Calculate fluxes and prepare outputs
##################################
# v = np.float64(area * 1000.0 / (60.0 * 60.0))
return y
#datapath="D:\Modellen\Version2012\HPC"
#Rain=np.loadtxt(os.path.join(datapath,'Rain_Cal_warm_1jan02_31dec05'))
#Rain=Rain[:200]
#
##ODE SOLVER
#InitCond=np.array([0.,0.,0.])
#const=0.
#trg=NAM_LSODA(const,InitCond,Rain)
#k=0.2
#plt.plot(k*trg)
#
#
##LINRES SOLUTION CHOW
#nn=3
#ffs=np.zeros((201,nn))
#k=0.2
#ff=np.ones(nn)*0.0
#ffs[0]=ff
#
#for t in range(200):
# ff=linres(nn,ff,Rain[t-1],Rain[t],1./k)
# ffs[t]=ff
#
#plt.plot(ffs)
#GAMMA DISTRIBUTIE
def gammat(nn,k):
'''
gamma-function based weight function to control the runoff delay => Chow, 1988
'''
frac_future=np.zeros(50.) #Parameter added
ntdh = frac_future.size
deltim=1.
# print 'qtimedelay is calculated with a unit of',deltim,'hours to have parameter values comparable to Clarke, 2008'
for jtim in range(ntdh):
tfuture=jtim*deltim
prob=(1./(k*special.gamma(nn)))*(tfuture/k)**(nn-1) * np.exp(-tfuture/k)
frac_future[jtim]=max(0.,prob)
# if cumprob < 0.99:
# print 'not enough bins in the frac_future'
#make sure sum to one
frac_future[:]=frac_future[:]/frac_future[:].sum()
return frac_future
def gamma_tdelay(set_par):
'''
gamma-function based weight function to control the runoff delay => Chow, 1988
'''
nn=set_par['nres']
frac_future=np.zeros(500.) #Parameter added
ntdh = frac_future.size
deltim=1.
# print 'qtimedelay is calculated with a unit of',deltim,'hours to have parameter values comparable to Clarke, 2008'
for jtim in range(ntdh):
tfuture=jtim*deltim
prob=(1./(k*special.gamma(nn)))*(tfuture/k)**(nn-1) * np.exp(-tfuture/k)
frac_future[jtim]=max(0.,prob)
# if cumprob < 0.99:
# print 'not enough bins in the frac_future'
#make sure sum to one
frac_future[:]=frac_future[:]/frac_future[:].sum()
return frac_future
def qtimedelay(set_par,deltim=1.):
'''
gamma-function based weight function to control the runoff delay
'''
alpha=3.
alamb = alpha/set_par['mut']
psave=0.0
set_par['frac_future']=np.zeros(50.) #Parameter added
ntdh = set_par['frac_future'].size
deltim=deltim
print 'qtimedelay is calculated with a unit of',deltim,'hours to have parameter values comparable to Clarke, 2008'
for jtim in range(ntdh):
# print jtim
tfuture=jtim*deltim
# print alamb*tfuture
cumprob= special.gammainc(alpha, alamb*tfuture)# hoeft niet want verschil wordt genomen: /special.gamma(alpha)
# print cumprob
set_par['frac_future'][jtim]=max(0.,cumprob-psave)
psave = cumprob
if cumprob < 0.99:
print 'not enough bins in the frac_future'
#make sure sum to one
set_par['frac_future'][:]=set_par['frac_future'][:]/set_par['frac_future'][:].sum()
return set_par
#nn=2.
#k=2.0
#
#frr=gammat(nn,k)
#qfuture = np.zeros(frr.size)
#qoo=np.zeros(Rain.size)
#
#for t in range(200):
# #Calculate routing
# ntdh = frr.size
# for jtim in range(ntdh):
# qfuture[jtim] = qfuture[jtim] + Rain[t] * frr[jtim]
#
# #outflow of the moment
# qoo[t]=qfuture[0]
# if qfuture[0] > 0.0:
# print qfuture[0]
#
# #move array back
# for jtim in range(1,ntdh):
# qfuture[jtim-1]=qfuture[jtim]
qfuture[ntdh-1] = 0.0
#plt.plot(qoo,'--')
#
#set_par={}
#set_par['mut']=1/0.2
#pars=qtimedelay(set_par,deltim=1.)
#tt=pars['frac_future']
#
#plt.plot(tt)
#plt.plot(frr)
#
#
#
#####PHD PLOT
#plt.figure()
#plt.subplots_adjust(wspace = 0.05)
#
#ax1=plt.subplot(121)
#nn=2.
#k= [2.5,5.,10.]
#liness=['k-','k--','k-.']
#
#cnt=0
#for ll in k:
# frr=gammat(nn,ll)
# ax1.plot(frr,liness[cnt],label=r'$k$ = '+str(ll))
# cnt+=1
#
##plt.xlabel(r'$\zeta$ ($ln(\alpha / \tan \beta $)')
#plt.xlabel(r'time')
#plt.ylabel(r'$h(t)$')
#plt.legend()
#
#majorLocator1= MaxNLocator(nbins=3,integer=True)
#ax1.xaxis.set_major_locator(majorLocator1)
#
#majorLocator2= MaxNLocator(nbins=3)
#ax1.yaxis.set_major_locator(majorLocator2)
#
#ax2=plt.subplot(122,sharey=ax1)
#ax2.get_yaxis().set_visible(False)
#nn=[2.,3.5, 5.]
#k=4.
#liness=['k-','k--','k-.']
#
#cnt=0
#for ll in nn:
# frr=gammat(ll,k)
# ax2.plot(frr,liness[cnt],label=r'$n$ = '+str(ll))
# cnt+=1
#
#ax2.xaxis.set_major_locator(majorLocator1)
#
##plt.xlabel(r'$\zeta$ ($ln(\alpha / \tan \beta $)')
#plt.xlabel(r'time')
##plt.ylabel(r'$\frac{A_c}{A}$')
#plt.legend()
#plt.savefig('Rout_pars.png')
#plt.savefig('Rout_pars.pdf')
#plt.savefig('Rout_pars.eps')
def Logistic1(State,Statemax,Psmooth=0.01):
'''
Uses a logistic function to smooth the threshold at the top of a bucket
See literature:
Clark, Martyn P., A. G. Slater, D. E. Rupp, R. A. Woods, Jasper A. Vrugt, H. V. Gupta, Thorsten Wagener, and L. E. Hay. Framework for Understanding Structural Errors (FUSE): A modular framework to diagnose differences between hydrological models. Water Resources Research 44 (2008): 14.
Original code from Clark, Martyn P.
'''
epsilon = 5.0 #Multiplier to ensures storagde is always less than capacity
Asmooth = Psmooth * Statemax #actual smoothing
# LOGISMOOTH = 1. / ( 1. + np.exp(-(State - (Statemax - Asmooth * epsilon) ) / Asmooth) )
LOGISMOOTH = 1. / ( 1. + np.exp(-(State - (Statemax) ) / Asmooth) )
return LOGISMOOTH
def Logistic2(State,Statemax,Psmooth=0.01):
'''
Uses a logistic function to smooth the threshold at the top of a bucket
See literature:
Clark, Martyn P., A. G. Slater, D. E. Rupp, R. A. Woods, Jasper A. Vrugt, H. V. Gupta, Thorsten Wagener, and L. E. Hay. Framework for Understanding Structural Errors (FUSE): A modular framework to diagnose differences between hydrological models. Water Resources Research 44 (2008): 14.
Original code from Clark, Martyn P.
'''
epsilon = 5.0 #Multiplier to ensures storagde is always less than capacity
Asmooth = Psmooth * Statemax #actual smoothing
LOGISMOOTH = 1. / ( 1. + np.exp(-(State - (Statemax - Asmooth * epsilon) ) / Asmooth) )
# LOGISMOOTH = 1. / ( 1. + np.exp(-(State - (Statemax) ) / Asmooth) )
return LOGISMOOTH
plt.figure()
plt.subplots_adjust(wspace = 0.05)
ax1=plt.subplot(121)
Smax=50.
S=np.arange(0.,100,0.1)
ll=np.zeros(S.size)
smooth=[0.0001,0.01,0.1]
tresh1=np.zeros(500)
tresh2=np.ones(500)
tresh=np.hstack((tresh1,tresh2))
ax1.plot(S,tresh,'k',label='step',linewidth=2.)
liness=['k:','k--','k-.']
cnt=0
for smo in smooth:
smo=smo*Smax
for i in range(S.size):
ll[i]=Logistic1(S[i],Smax,Psmooth=smo)
ax1.plot(S,ll,liness[cnt],label=r'$\omega$='+str(smo))
cnt+=1
ax1.set_ylim([0.0,1.02])
ax1.set_ylabel(r'$\Phi(S,S_{max},\omega)$')
ax1.set_xticks([50.])
ax1.set_xticklabels([r'$S_{max}$'])
ax1.set_yticks([0.,0.5,1.])
#ax1.set_ylabel()
ax1.legend(loc=4,frameon=False,bbox_to_anchor=(1.05,0.001))
#ax1.legend(loc=4,frameon=True,bbox_to_anchor=(1.2,0.001))
#secodnd subplot
ax2=plt.subplot(122)
Smax=50.
S=np.arange(0.,55.,0.1)
ll=np.zeros(S.size)
smooth=[0.0001,0.01,0.1]
tresh1=np.zeros(500.)
tresh2=np.ones(10*5)
tresh=np.hstack((tresh1,tresh2))
ax2.plot(S,tresh,'k',label='step',linewidth=2.)
liness=['k:','k--','k-.']
cnt=0
for smo in smooth:
smo=smo*Smax
for i in range(S.size):
ll[i]=Logistic2(S[i],Smax,Psmooth=smo)
ax2.plot(S,ll,liness[cnt],label=r'$\omega$='+str(smo))
cnt+=1
ax2.get_yaxis().set_visible(False)
ax2.set_ylim([0.0,1.02])
ax2.set_xlim([0.0,55.])
ax2.set_xticks([50.])
ax2.set_xticklabels([r'$S_{max}$'])
#ax2.set_yticks([0.,0.5,1.])
#ax1.set_ylabel()
#ax2.legend(loc=8)
#leg=ax2.legend(loc='upper center', bbox_to_anchor=(-0.025, 1.12),mode="expand",frameon=False, shadow=False,ncol=4)
plt.savefig('Smooth_pars.png')
plt.savefig('Smooth_pars.pdf')
plt.savefig('Smooth_pars.eps')
| bsd-3-clause |
madjelan/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
zorroblue/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 84 | 5205 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using gaussian process regressor
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
krishnatray/data_science_project_portfolio | flask/server.py | 1 | 1304 | import sys
import pickle
from flask import Flask, render_template, request, jsonify, Response
import pandas as pd
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return '<h1> Hello World! </h1>'
@app.route('/version', methods=['GET'])
def version():
return sys.version
@app.route('/square', methods=['POST'])
def square():
req = request.get_json()
print("The request was", req)
x = req['x']
return jsonify({'x': x, 'x**2': x**2})
model = pickle.load(open('linreg.p', 'rb'))
@app.route('/inference', methods = ['POST'])
def inference():
req = request.get_json()
c, h, w = req['cylinders'], req['horsepower'], req['weight']
prediction = model.predict([[c, h, w]])
return jsonify({'c':c, 'h': h, 'w': w, 'prediction': prediction[0]})
@app.route('/about', methods = ['GET'])
def about():
return render_template('about.html')
@app.route('/faq', methods = ['GET'])
def faq():
return render_template('faq.html')
@app.route('/mpg', methods = ['GET'])
def mpg():
return render_template('mpg.html')
df = pd.read_csv('cars.csv')
@app.route('/plot', methods = ['GET'])
def plot():
data = list(zip(df.mpg, df.weight))
return jsonify(data)
if __name__ == '__main__':
app.run(host = '127.0.0.1', port=3333, debug= True)
| mit |
larsoner/mne-python | examples/decoding/plot_decoding_xdawn_eeg.py | 9 | 4103 | """
============================
XDAWN Decoding From EEG data
============================
ERP decoding with Xdawn :footcite:`RivetEtAl2009,RivetEtAl2011`. For each event
type, a set of spatial Xdawn filters are trained and applied on the signal.
Channels are concatenated and rescaled to create features vectors that will be
fed into a logistic regression.
"""
# Authors: Alexandre Barachant <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from mne import io, pick_types, read_events, Epochs, EvokedArray
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.decoding import Vectorizer
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2,
'Visual/Left': 3, 'Visual/Right': 4}
n_filter = 3
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = read_events(event_fname)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
# Create classification pipeline
clf = make_pipeline(Xdawn(n_components=n_filter),
Vectorizer(),
MinMaxScaler(),
LogisticRegression(penalty='l1', solver='liblinear',
multi_class='auto'))
# Get the labels
labels = epochs.events[:, -1]
# Cross validator
cv = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
# Do cross-validation
preds = np.empty(len(labels))
for train, test in cv.split(epochs, labels):
clf.fit(epochs[train], labels[train])
preds[test] = clf.predict(epochs[test])
# Classification report
target_names = ['aud_l', 'aud_r', 'vis_l', 'vis_r']
report = classification_report(labels, preds, target_names=target_names)
print(report)
# Normalized confusion matrix
cm = confusion_matrix(labels, preds)
cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis]
# Plot confusion matrix
fig, ax = plt.subplots(1)
im = ax.imshow(cm_normalized, interpolation='nearest', cmap=plt.cm.Blues)
ax.set(title='Normalized Confusion matrix')
fig.colorbar(im)
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
fig.tight_layout()
ax.set(ylabel='True label', xlabel='Predicted label')
###############################################################################
# The ``patterns_`` attribute of a fitted Xdawn instance (here from the last
# cross-validation fold) can be used for visualization.
fig, axes = plt.subplots(nrows=len(event_id), ncols=n_filter,
figsize=(n_filter, len(event_id) * 2))
fitted_xdawn = clf.steps[0][1]
tmp_info = epochs.info.copy()
tmp_info['sfreq'] = 1.
for ii, cur_class in enumerate(sorted(event_id)):
cur_patterns = fitted_xdawn.patterns_[cur_class]
pattern_evoked = EvokedArray(cur_patterns[:n_filter].T, tmp_info, tmin=0)
pattern_evoked.plot_topomap(
times=np.arange(n_filter),
time_format='Component %d' if ii == 0 else '', colorbar=False,
show_names=False, axes=axes[ii], show=False)
axes[ii, 0].set(ylabel=cur_class)
fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1)
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/feature_extraction/tests/test_feature_hasher.py | 41 | 3668 | from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import assert_raises, assert_true, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
gotomypc/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
xu6148152/Binea_Python_Project | PythonCookbook/data_encode/data_encode_handle.py | 1 | 6693 | #! python3
# -*- encoding: utf-8 -*-
def test_stock_csv():
import csv
from collections import namedtuple
with open('AMEX.csv') as f:
f_csv = csv.reader(f)
# f_csv = csv.DictReader(f)
headers = next(f_csv)
# headers = [re.sub('[^a-zA-Z_]', '_', h) for h in next(f_csv)]
print(headers)
Row = namedtuple('Row', headers)
print(Row)
for r in f_csv:
row = Row(*r)
print(row)
# print(r['Name'])
headers = ['Symbol', 'Price', 'Date', 'Time', 'Change', 'Volume']
rows = [{'Symbol': 'AA', 'Price': 39.48, 'Date': '6/11/2007', 'Time': '9:36am', 'Change': -0.18, 'Volume': 181800}]
with open('stock.csv', 'w') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
f_csv.writerows(rows)
def test_json_handle():
import json
data = {
'name': 'ACME',
'shares': 100,
'price': 542.23
}
json_str = json.dumps(data)
print(json_str)
data = json.loads(json_str)
print(data)
with open('data.json', 'w') as f:
json.dump(data, f)
with open('data.json', 'r') as f:
data = json.load(f)
print(data)
# data = json.load(json_str)
#
# print(data)
# from urllib.request import urlopen
# u = urlopen('http://search.twitter.com/search.json?q=python&rpp=5')
# try:
# resp = json.loads(u.read().decode('utf-8'))
# except Exception as e:
# print(e)
#
# from pprint import pprint
# pprint(resp)
s = '{"name": "ACME", "shares": 50, "price": 490.1}'
from collections import OrderedDict
data = json.loads(s, object_pairs_hook=OrderedDict)
print(data)
def test_xml_handle():
from urllib.request import urlopen
from xml.etree.ElementTree import parse
u = urlopen('http://planet.python.org/rss20.xml')
doc = parse(u)
for item in doc.iterfind('channel/item'):
title = item.findtext('title')
date = item.findtext('pubDate')
link = item.findtext('link')
print(title)
print(date)
print(link)
print()
def parse_and_remove(filename, path):
from xml.etree.ElementTree import iterparse
path_parts = path.split('/')
doc = iterparse(filename, ('start', 'end'))
# Skip the root element
next(doc)
tag_stack = []
elem_stack = []
for event, elem in doc:
if event == 'start':
tag_stack.append(elem.tag)
elem_stack.append(elem)
elif event == 'end':
if tag_stack == path_parts:
yield elem
elem_stack[-2].remove(elem)
try:
tag_stack.pop()
elem_stack.pop()
except IndexError:
pass
def test_dict_xml():
s = {'name': 'GOOG', 'shares': 100, 'price': 490.1}
e = dict_to_xml('stock', s)
from xml.etree.ElementTree import tostring
print(tostring(e))
e.set('_id', '1234')
print(tostring(e))
def dict_to_xml(tag, d):
from xml.etree.ElementTree import Element
elem = Element(tag)
for key, val in d.items():
child = Element(key)
child.text = str(val)
elem.append(child)
return elem
def test_binascii():
s = b'hello'
import binascii
h = binascii.b2a_hex(s)
print(h)
print(binascii.a2b_hex(h))
def test_base64():
s = b'hello'
import base64
a = base64.b64encode(s)
print(a)
print(base64.b64decode(a))
def write_records(records, format, f):
from struct import Struct
record_struct = Struct(format)
for r in records:
print(r)
print(*r)
f.write(record_struct.pack(*r))
def test_struct():
records = [(1, 2.3, 4.5),
(6, 7.8, 9.0),
(12, 13.4, 56.7)]
with open('data.b', 'wb') as f:
write_records(records, '<idd', f)
with open('data.b', 'rb') as f:
for rec in read_records('<idd', f):
print(rec)
with open('data.b', 'rb') as f:
data = f.read()
for rec in unpack_records('<idd', data):
print(rec)
def read_records(format, f):
from struct import Struct
record_struct = Struct(format)
chunks = iter(lambda: f.read(record_struct.size), b'')
return (record_struct.unpack(chunk) for chunk in chunks)
def unpack_records(format, data):
from struct import Struct
record_struct = Struct(format)
return (record_struct.unpack_from(data, offset) for offset in range(0, len(data), record_struct.size))
def write_polys(filename, polys):
import struct
import itertools
flattened = list(itertools.chain(*polys))
min_x = min(x for x, y in flattened)
max_x = max(x for x, y in flattened)
min_y = min(y for x, y in flattened)
max_y = max(y for x, y in flattened)
with open(filename, 'wb') as f:
f.write(struct.pack('<iddddi', 0x1234, min_x, min_y, max_x, max_y, len(polys)))
for poly in polys:
size = len(poly) * struct.calcsize('<dd')
f.write(struct.pack('<i', size + 4))
for pt in poly:
f.write(struct.pack('<dd', *pt))
def read_polys(filename):
# import struct
# with open(filename, 'rb') as f:
# header = f.read(40)
# file_code, min_x, min_y, max_x, max_y, num_polys = struct.unpack('<iddddi', header)
# polys = []
# for n in range(num_polys):
# pbytes, = struct.unpack('<i', f.read(4))
# poly = []
# for m in range(pbytes // 16):
# pt = struct.unpack('<dd', f.read(16))
# poly.append(pt)
# polys.append(poly)
# return polys
polys = []
with open(filename, 'rb') as f:
from data_encode.poly_parser import PolyHeader
from data_encode.poly_parser import SizeRecord
from data_encode.poly_parser import Point
phead = PolyHeader.from_file(f)
for n in range(phead.num_polys):
rec = SizeRecord.from_file(f, '<i')
poly = [(p.x, p.y) for p in rec.iter_as(Point)]
polys.append(poly)
return polys
def test_poly_handle():
polys = [
[(1.0, 2.5), (3.5, 4.0), (2.5, 1.5)],
[(7.0, 1.2), (5.1, 3.0), (0.5, 7.5), (0.8, 9.0)],
[(3.4, 6.3), (1.2, 0.5), (4.6, 9.2)],
]
write_polys('poly.b', polys)
print(read_polys('poly.b'))
def test_pandas():
import pandas
rats = pandas.read_csv('AMEX.csv')
print(rats)
print(rats['Symbol'].unique())
crew_dispatched = rats[rats['Symbol'] == 'FAX']
print(len(crew_dispatched))
if __name__ == '__main__':
test_pandas() | mit |
ghorn/rawesome | examples/carousel/carousel_crosswind_homotopy.py | 2 | 14432 | # Copyright 2012-2013 Greg Horn
#
# This file is part of rawesome.
#
# rawesome is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rawesome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with rawesome. If not, see <http://www.gnu.org/licenses/>.
import copy
import casadi as C
import matplotlib.pyplot as plt
import numpy
from numpy import pi
import pickle
import rawe
import rawekite
import carousel_dae
from autogen.tocarouselProto import toProto
from autogen.carousel_pb2 import Trajectory
def constrainTetherForce(ocp):
for k in range(ocp.nk):
for j in range(1,ocp.deg+1): #[1]:
ocp.constrain( ocp.lookup('tether_tension',timestep=k,degIdx=j), '>=', 0, tag=('tether tension positive',k))
def realMotorConstraints(ocp):
for k in range(nk):
# ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=1), '<=', 150, tag=('winch torque',k))
# ocp.constrain( ocp.lookup('torque',timestep=k,degIdx=ocp.deg), '<=', 150, tag=('winch torque',k))
ocp.constrainBnds( ocp.lookup('torque',timestep=k,degIdx=1),
(-78,78), tag=('winch torque',k))
ocp.constrainBnds( ocp.lookup('torque',timestep=k,degIdx=ocp.deg),
(-78,78), tag=('winch torque',k))
ocp.constrain( ocp.lookup('rpm',timestep=k), '<=', 1500, tag=('rpm',k))
ocp.constrain( -1500, '<=', ocp.lookup('rpm',timestep=k), tag=('rpm',k))
def constrainAirspeedAlphaBeta(ocp):
for k in range(0,ocp.nk):
for j in range(0,ocp.deg+1):
ocp.constrainBnds(ocp.lookup('airspeed',timestep=k,degIdx=j),
(10,65), tag=('airspeed',(k,j)))
ocp.constrainBnds(ocp.lookup('alpha_deg',timestep=k,degIdx=j), (-4.5,8.5), tag=('alpha(deg)',(k,j)))
#ocp.constrain(ocp.lookup('cL',timestep=k,degIdx=j), '>=', -0.15, tag=('CL > -0.15',(k,j)))
ocp.constrainBnds(ocp.lookup('beta_deg', timestep=k,degIdx=j), (-10,10), tag=('beta(deg)',(k,j)))
x = ocp('x', timestep=k,degIdx=j)
y = ocp('y', timestep=k,degIdx=j)
z = ocp('z', timestep=k,degIdx=j)
ocp.constrain(2*C.sqrt(x**2 + y**2), '>=', -z, tag=('azimuth not too high',(k,j)))
def setupOcp(dae,conf,nk,nicp=1,deg=4):
ocp = rawe.collocation.Coll(dae, nk=nk,nicp=nicp,deg=deg)
print "setting up collocation..."
ocp.setupCollocation(ocp.lookup('endTime'))
# constrain line angle
for k in range(0,nk):
ocp.constrain(ocp.lookup('cos_line_angle',timestep=k),'>=',C.cos(65*pi/180), tag=('line angle',k))
constrainAirspeedAlphaBeta(ocp)
constrainTetherForce(ocp)
#realMotorConstraints(ocp)
# bounds
ocp.bound('aileron', (numpy.radians(-10),numpy.radians(10)))
ocp.bound('elevator',(numpy.radians(-10),numpy.radians(10)))
ocp.bound('rudder', (numpy.radians(-10),numpy.radians(10)))
ocp.bound('flaps', (numpy.radians(0),numpy.radians(0)))
# can't bound flaps==0 AND have periodic flaps at the same time
# bounding flaps (-1,1) at timestep 0 doesn't really free them, but satisfies LICQ
ocp.bound('flaps', (-1,1),timestep=0,quiet=True)
ocp.bound('daileron', (numpy.radians(-40), numpy.radians(40)))
ocp.bound('delevator', (numpy.radians(-40), numpy.radians(40)))
ocp.bound('drudder', (numpy.radians(-40), numpy.radians(40)))
ocp.bound('dflaps', (numpy.radians(-40), numpy.radians(40)))
ocp.bound('ddelta',(-2*pi, 2*pi))
ocp.bound('x',(-2000,2000))
ocp.bound('y',(-2000,2000))
ocp.bound('z',(-2000,0))
ocp.bound('r',(2,300))
ocp.bound('dr',(-100,100))
ocp.bound('ddr',(-150,150))
ocp.bound('dddr',(-200,200))
ocp.bound('motor_torque',(-500,500))
ocp.bound('motor_torque',(0,0),timestep=0)
#ocp.bound('dmotor_torque',(-1000,1000))
ocp.bound('dmotor_torque',(0,0))
ocp.bound('cos_delta',(0,1.5))
ocp.bound('sin_delta',(-0.4,0.4))
for e in ['e11','e21','e31','e12','e22','e32','e13','e23','e33']:
ocp.bound(e,(-1.1,1.1))
for d in ['dx','dy','dz']:
ocp.bound(d,(-70,70))
for w in ['w_bn_b_x',
'w_bn_b_y',
'w_bn_b_z']:
ocp.bound(w,(-4*pi,4*pi))
ocp.bound('endTime',(3.5,6.0))
# ocp.bound('endTime',(4.8,4.8))
ocp.guess('endTime',4.8)
ocp.bound('w0',(10,10))
# boundary conditions
ocp.bound('y',(0,0),timestep=0,quiet=True)
ocp.bound('sin_delta',(0,0),timestep=0,quiet=True)
# constrain invariants
def constrainInvariantErrs():
rawekite.kiteutils.makeOrthonormal(ocp, ocp.lookup('R_c2b',timestep=0))
ocp.constrain(ocp.lookup('c',timestep=0), '==', 0, tag=('initial c 0',None))
ocp.constrain(ocp.lookup('cdot',timestep=0), '==', 0, tag=('initial cdot 0',None))
ocp.constrain(ocp('sin_delta',timestep=0)**2 + ocp('cos_delta',timestep=0)**2,
'==', 1, tag=('sin**2 + cos**2 == 1',None))
constrainInvariantErrs()
# make it periodic
for name in [ "x","y","z",
"dx","dy","dz",
"w_bn_b_x","w_bn_b_y","w_bn_b_z",
'ddr',
'ddelta',
'aileron','elevator','rudder','flaps',
# 'motor_torque',
'sin_delta'
]:
ocp.constrain(ocp.lookup(name,timestep=0),'==',ocp.lookup(name,timestep=-1), tag=('periodic '+name,None))
# periodic attitude
rawekite.kiteutils.periodicDcm(ocp)
return ocp
if __name__=='__main__':
from rawe.models.betty_conf import makeConf
conf = makeConf()
conf['runHomotopy'] = True
nk = 40
dae = rawe.models.carousel(conf)
dae.addP('endTime')
print "setting up ocp..."
ocp = setupOcp(dae,conf,nk)
lineRadiusGuess = 80.0
circleRadiusGuess = 20.0
# trajectory for homotopy
homotopyTraj = {'x':[],'y':[],'z':[]}
# direction = 1: positive about aircraft z
# direction = -1: negative about aircraft z
direction = 1
k = 0
for nkIdx in range(ocp.nk+1):
for nicpIdx in range(ocp.nicp):
if nkIdx == ocp.nk and nicpIdx > 0:
break
for degIdx in range(ocp.deg+1):
if nkIdx == ocp.nk and degIdx > 0:
break
r = circleRadiusGuess
h = numpy.sqrt(lineRadiusGuess**2 - r**2)
nTurns = 1
# path following
theta = 2*pi*(k+ocp.lagrangePoly.tau_root[degIdx])/float(ocp.nk*ocp.nicp)
theta -= pi
theta *= -direction
thetaDot = nTurns*2*pi/(ocp._guess.lookup('endTime'))
thetaDot *= -direction
xyzCircleFrame = numpy.array([h, r*numpy.sin(theta), -r*numpy.cos(theta)])
xyzDotCircleFrame = numpy.array([0, r*numpy.cos(theta)*thetaDot, r*numpy.sin(theta)*thetaDot])
phi = numpy.arcsin(r/lineRadiusGuess) # rotate so it's above ground
phi += numpy.arcsin((1.3)/lineRadiusGuess)
phi += 10*pi/180
R_c2n = numpy.matrix([[ numpy.cos(phi), 0, numpy.sin(phi)],
[ 0, 1, 0],
[ -numpy.sin(phi), 0, numpy.cos(phi)]])
xyz = numpy.dot(R_c2n, xyzCircleFrame)
xyzDot = numpy.dot(R_c2n, xyzDotCircleFrame)
if nicpIdx == 0 and degIdx == 0:
homotopyTraj['x'].append(float(xyz[0,0]))
homotopyTraj['y'].append(float(xyz[0,1]))
homotopyTraj['z'].append(float(xyz[0,2]))
x = float(xyz[0,0])
y = float(xyz[0,1])
z = float(xyz[0,2])
dx = float(xyzDot[0,0])
dy = float(xyzDot[0,1])
dz = float(xyzDot[0,2])
ocp.guess('x',x,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('y',y,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('z',z,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('dx',dx,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('dy',dy,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('dz',dz,timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
p0 = numpy.array([x,y,z])
dp0 = numpy.array([dx,dy,dz])
e1 = dp0/numpy.linalg.norm(dp0)
e3 = -p0/lineRadiusGuess
e2 = numpy.cross(e3,e1)
ocp.guess('e11',e1[0],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e12',e1[1],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e13',e1[2],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e21',e2[0],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e22',e2[1],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e23',e2[2],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e31',e3[0],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e32',e3[1],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
ocp.guess('e33',e3[2],timestep=nkIdx,nicpIdx=nicpIdx,degIdx=degIdx)
k += 1
ocp.guess('w_bn_b_z', direction*2.0*pi/ocp._guess.lookup('endTime'))
# objective function
obj = -1e6*ocp.lookup('gamma_homotopy')
mean_x = numpy.mean(homotopyTraj['x'])
mean_y = numpy.mean(homotopyTraj['y'])
mean_z = numpy.mean(homotopyTraj['z'])
for k in range(ocp.nk+1):
x = ocp.lookup('x',timestep=k)
y = ocp.lookup('y',timestep=k)
z = ocp.lookup('z',timestep=k)
obj += ((x-mean_x)**2 + (y-mean_y)**2 + (z-mean_z)**2 - circleRadiusGuess**2)**2
obj += 10*ocp.lookup('sin_delta',timestep=k)**2
# for k in range(ocp.nk+1):
# obj += (homotopyTraj['x'][k] - ocp.lookup('x',timestep=k))**2
# obj += (homotopyTraj['y'][k] - ocp.lookup('y',timestep=k))**2
# obj += (homotopyTraj['z'][k] - ocp.lookup('z',timestep=k))**2
ocp.setQuadratureDdt('mechanical_energy', 'mechanical_winch_power')
ocp.setQuadratureDdt('electrical_energy', 'electrical_winch_power')
# control regularization
for k in range(ocp.nk):
regs = {'dddr':1.0,
'daileron':numpy.degrees(20.0),
'delevator':numpy.degrees(20.0),
'drudder':numpy.degrees(20.0),
'dflaps':numpy.degrees(20.0),
'dmotor_torque':5.0}
for name in regs:
val = ocp.lookup(name,timestep=k)
obj += 1e-2*val**2/float(regs[name]**2)/float(ocp.nk)
# homotopy forces/torques regularization
homoReg = 0
for k in range(ocp.nk):
for nicpIdx in range(ocp.nicp):
for degIdx in range(1,ocp.deg+1):
homoReg += ocp.lookup('f1_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
homoReg += ocp.lookup('f2_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
homoReg += ocp.lookup('f3_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
homoReg += ocp.lookup('t1_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
homoReg += ocp.lookup('t2_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
homoReg += ocp.lookup('t3_homotopy',timestep=k,nicpIdx=nicpIdx,degIdx=degIdx)**2
obj += 1e-2*homoReg/float(ocp.nk*ocp.nicp*ocp.deg)
ocp.setObjective( obj )
# initial guesses
ocp.guess('w0',10)
ocp.guess('r',lineRadiusGuess)
ocp.guess('cos_delta',1)
ocp.guess('sin_delta',0)
for name in ['w_bn_b_x','w_bn_b_y','dr','ddr','dddr','aileron','rudder','flaps',
'motor_torque','dmotor_torque','ddelta',
'elevator','daileron','delevator','drudder','dflaps']:
ocp.guess(name,0)
ocp.guess('gamma_homotopy',0)
# spawn telemetry thread
callback = rawe.telemetry.startTelemetry(
ocp, callbacks=[
(rawe.telemetry.trajectoryCallback(toProto, Trajectory, showAllPoints=True), 'carousel trajectory')
], printBoundViolation=True, printConstraintViolation=True)
# solver
solverOptions = [("linear_solver","ma97"),
("max_iter",1000),
("expand",True),
("tol",1e-8)]
print "setting up solver..."
ocp.setupSolver( solverOpts=solverOptions,
callback=callback )
xInit = None
ocp.bound('gamma_homotopy',(1e-4,1e-4),force=True)
traj = ocp.solve(xInit=xInit)
ocp.bound('gamma_homotopy',(0,1),force=True)
traj = ocp.solve(xInit=traj.getDvs())
ocp.bound('gamma_homotopy',(1,1),force=True)
# ocp.bound('endTime',(3.5,6.0),force=True)
traj = ocp.solve(xInit=traj.getDvs())
traj.save("data/carousel_crosswind_homotopy.dat")
# Plot the results
def plotResults():
traj.subplot(['f1_homotopy','f2_homotopy','f3_homotopy'])
traj.subplot(['t1_homotopy','t2_homotopy','t3_homotopy'])
traj.subplot(['r_n2b_n_x','r_n2b_n_y','r_n2b_n_z'])
traj.subplot(['v_bn_n_x','v_bn_n_y','v_bn_n_z'])
traj.subplot([['aileron','elevator'],['daileron','delevator']],title='control surfaces')
traj.subplot(['r','dr','ddr'])
traj.subplot(['wind_at_altitude','dr','v_bn_n_x'])
traj.subplot(['c','cdot','cddot'],title="invariants")
traj.plot('airspeed')
traj.subplot([['alpha_deg'],['beta_deg']])
traj.subplot(['cL','cD','L_over_D'])
traj.subplot(['mechanical_winch_power', 'tether_tension'])
traj.subplot(['w_bn_b_x','w_bn_b_y','w_bn_b_z'])
traj.subplot(['e11','e12','e13','e21','e22','e23','e31','e32','e33'])
traj.plot(['nu'])
plt.show()
plotResults()
| lgpl-3.0 |
equialgo/scikit-learn | sklearn/metrics/setup.py | 69 | 1061 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.pyx"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
sinhrks/scikit-learn | sklearn/datasets/tests/test_lfw.py | 55 | 7877 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
wathen/PhD | MHD/FEniCS/MHD/CG/3D/MHD.py | 1 | 9079 | #!/usr/bin/python
# interpolate scalar gradient onto nedelec space
from dolfin import *
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
Print = PETSc.Sys.Print
# from MatrixOperations import *
import numpy as np
import matplotlib.pylab as plt
import PETScIO as IO
import common
import scipy
import scipy.io
import time as t
import BiLinear as forms
import IterOperations as Iter
import MatrixOperations as MO
import CheckPetsc4py as CP
import ExactSol
m = 2
errL2u =np.zeros((m-1,1))
errH1u =np.zeros((m-1,1))
errL2p =np.zeros((m-1,1))
errL2b =np.zeros((m-1,1))
errCurlb =np.zeros((m-1,1))
errL2r =np.zeros((m-1,1))
errH1r =np.zeros((m-1,1))
l2uorder = np.zeros((m-1,1))
H1uorder =np.zeros((m-1,1))
l2porder = np.zeros((m-1,1))
l2border = np.zeros((m-1,1))
Curlborder =np.zeros((m-1,1))
l2rorder = np.zeros((m-1,1))
H1rorder = np.zeros((m-1,1))
NN = np.zeros((m-1,1))
DoF = np.zeros((m-1,1))
Velocitydim = np.zeros((m-1,1))
Magneticdim = np.zeros((m-1,1))
Pressuredim = np.zeros((m-1,1))
Lagrangedim = np.zeros((m-1,1))
Wdim = np.zeros((m-1,1))
iterations = np.zeros((m-1,1))
SolTime = np.zeros((m-1,1))
udiv = np.zeros((m-1,1))
MU = np.zeros((m-1,1))
level = np.zeros((m-1,1))
nn = 2
dim = 2
ShowResultPlots = 'yes'
split = 'Linear'
MU[0]= 1e0
for xx in xrange(1,m):
print xx
level[xx-1] = xx
nn = 2**(level[xx-1])
# Create mesh and define function space
nn = int(nn)
NN[xx-1] = nn/2
parameters["form_compiler"]["quadrature_degree"] = 6
# parameters = CP.ParameterSetup()
mesh = BoxMesh(0, 0, 0, 1, 1, 1, nn, nn, nn)
order = 2
parameters['reorder_dofs_serial'] = False
Velocity = VectorFunctionSpace(mesh, "CG", order)
Pressure = FunctionSpace(mesh, "CG", order-1)
Magnetic = FunctionSpace(mesh, "N1curl", order)
Lagrange = FunctionSpace(mesh, "CG", order)
W = MixedFunctionSpace([Velocity,Pressure,Magnetic,Lagrange])
# W = Velocity*Pressure*Magnetic*Lagrange
Velocitydim[xx-1] = Velocity.dim()
Pressuredim[xx-1] = Pressure.dim()
Magneticdim[xx-1] = Magnetic.dim()
Lagrangedim[xx-1] = Lagrange.dim()
Wdim[xx-1] = W.dim()
print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n"
dim = [Velocity.dim(), Pressure.dim(), Magnetic.dim(), Lagrange.dim()]
def boundary(x, on_boundary):
return on_boundary
u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD3D(7,1)
bcu = DirichletBC(W.sub(0),u0, boundary)
bcb = DirichletBC(W.sub(2),b0, boundary)
bcr = DirichletBC(W.sub(3),r0, boundary)
# bc = [u0,p0,b0,r0]
bcs = [bcu,bcb,bcr]
FSpaces = [Velocity,Pressure,Magnetic,Lagrange]
(u, p, b, r) = TrialFunctions(W)
(v, q, c,s ) = TestFunctions(W)
kappa = 1.0
Mu_m =1e4
MU = 1.0
IterType = 'MD'
F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple
if kappa == 0:
F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple
else:
F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple
params = [kappa,Mu_m,MU]
u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,Neumann=Expression(("0","0")),options ="New")
ones = Function(Pressure)
ones.vector()[:]=(0*ones.vector().array()+1)
pConst = - assemble(p_k*dx)/assemble(ones*dx)
p_k.vector()[:] += pConst
x = Iter.u_prev(u_k,p_k,b_k,r_k)
# plot(b_k)
ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType)
RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params)
bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0","0.0")), boundary)
bcb = DirichletBC(W.sub(2),Expression(("0.0","0.0","0.0")), boundary)
bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary)
bcs = [bcu,bcb,bcr]
eps = 1.0 # error measure ||u-u_k||
tol = 1.0E-8 # tolerance
iter = 0 # iteration counter
maxiter = 40 # max no of iterations allowed
SolutionTime = 0
outer = 0
# parameters['linear_algebra_backend'] = 'uBLAS'
p = forms.Preconditioner(mesh,W,u_k,b_k,params,IterType)
PP,Pb = assemble_system(p, Lns,bcs)
if IterType == "CD":
AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs)
# A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
# P = CP.Assemble(PP)
while eps > tol and iter < maxiter:
iter += 1
if IterType == "CD":
bb = assemble((Lmaxwell + Lns) - RHSform)
# for bc in bcs:
# bc.apply(bb)
# A,b = CP.Assemble(AA,bb)
# u = b.duplicate()
else:
AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs)
A,b = CP.Assemble(AA,bb)
u = b.duplicate()
ksp = PETSc.KSP().create()
pc = ksp.getPC()#.PC().create()
ksp.setOperators(A)
OptDB = PETSc.Options()
OptDB["ksp_type"] = "preonly"
OptDB["pc_type"] = "lu"
OptDB["pc_factor_mat_ordering_type"] = "amd"
OptDB["pc_factor_mat_solver_package"] = "mumps"
# OptDB["pc_factor_shift_amount"] = 2
ksp.setFromOptions()
tic()
ksp.solve(b, u)
time = toc()
print time
SolutionTime = SolutionTime +time
u, p, b, r, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter)
p.vector()[:] += - assemble(p*dx)/assemble(ones*dx)
u_k.assign(u)
p_k.assign(p)
b_k.assign(b)
r_k.assign(r)
uOld= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0)
x = IO.arrayToVec(uOld)
# u_k,b_k,epsu,epsb=Iter.PicardTolerance(x,u_k,b_k,FSpaces,dim,"inf",iter)
SolTime[xx-1] = SolutionTime/iter
ue =u0
pe = p0
be = b0
re = r0
ExactSolution = [ue,pe,be,re]
errL2u[xx-1], errH1u[xx-1], errL2p[xx-1], errL2b[xx-1], errCurlb[xx-1], errL2r[xx-1], errH1r[xx-1] = Iter.Errors(x,mesh,FSpaces,ExactSolution,order,dim)
if xx == 1:
l2uorder[xx-1] = 0
else:
l2uorder[xx-1] = np.abs(np.log2(errL2u[xx-2]/errL2u[xx-1]))
H1uorder[xx-1] = np.abs(np.log2(errH1u[xx-2]/errH1u[xx-1]))
l2porder[xx-1] = np.abs(np.log2(errL2p[xx-2]/errL2p[xx-1]))
l2border[xx-1] = np.abs(np.log2(errL2b[xx-2]/errL2b[xx-1]))
Curlborder[xx-1] = np.abs(np.log2(errCurlb[xx-2]/errCurlb[xx-1]))
l2rorder[xx-1] = np.abs(np.log2(errL2r[xx-2]/errL2r[xx-1]))
H1rorder[xx-1] = np.abs(np.log2(errH1r[xx-2]/errH1r[xx-1]))
import pandas as pd
LatexTitles = ["l","DoFu","Dofp","V-L2","L2-order","V-H1","H1-order","P-L2","PL2-order"]
LatexValues = np.concatenate((level,Velocitydim,Pressuredim,errL2u,l2uorder,errH1u,H1uorder,errL2p,l2porder), axis=1)
LatexTable = pd.DataFrame(LatexValues, columns = LatexTitles)
pd.set_option('precision',3)
LatexTable = MO.PandasFormat(LatexTable,"V-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'V-H1',"%2.4e")
LatexTable = MO.PandasFormat(LatexTable,"H1-order","%1.2f")
LatexTable = MO.PandasFormat(LatexTable,'L2-order',"%1.2f")
LatexTable = MO.PandasFormat(LatexTable,"P-L2","%2.4e")
LatexTable = MO.PandasFormat(LatexTable,'PL2-order',"%1.2f")
print LatexTable.to_latex()
print "\n\n Magnetic convergence"
MagneticTitles = ["l","B DoF","R DoF","B-L2","L2-order","B-Curl","HCurl-order"]
MagneticValues = np.concatenate((level,Magneticdim,Lagrangedim,errL2b,l2border,errCurlb,Curlborder),axis=1)
MagneticTable= pd.DataFrame(MagneticValues, columns = MagneticTitles)
pd.set_option('precision',3)
MagneticTable = MO.PandasFormat(MagneticTable,"B-Curl","%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,'B-L2',"%2.4e")
MagneticTable = MO.PandasFormat(MagneticTable,"L2-order","%1.2f")
MagneticTable = MO.PandasFormat(MagneticTable,'HCurl-order',"%1.2f")
print MagneticTable.to_latex()
print "\n\n Lagrange convergence"
LagrangeTitles = ["l","SolTime","B DoF","R DoF","R-L2","L2-order","R-H1","H1-order"]
LagrangeValues = np.concatenate((level,SolTime,Magneticdim,Lagrangedim,errL2r,l2rorder,errH1r,H1rorder),axis=1)
LagrangeTable= pd.DataFrame(LagrangeValues, columns = LagrangeTitles)
pd.set_option('precision',3)
LagrangeTable = MO.PandasFormat(LagrangeTable,"R-L2","%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,'R-H1',"%2.4e")
LagrangeTable = MO.PandasFormat(LagrangeTable,"H1-order","%1.2f")
LagrangeTable = MO.PandasFormat(LagrangeTable,'L2-order',"%1.2f")
print LagrangeTable.to_latex()
# # # if (ShowResultPlots == 'yes'):
# # plot(ua)
# plot(interpolate(ue,Velocity))
# # plot(pp)
# pe = interpolate(pe,Pressure)
# pe.vector()[:] -= np.max(pe.vector().array() )/2
# plot(interpolate(pe,Pressure))
# # plot(ba)
# plot(interpolate(be,Magnetic))
# # plot(ra)
# plot(interpolate(re,Lagrange))
# interactive()
interactive()
| mit |
wwf5067/statsmodels | statsmodels/base/data.py | 10 | 21796 | """
Base tools for handling various kinds of data structures, attaching metadata to
results, and doing data cleaning
"""
from statsmodels.compat.python import reduce, iteritems, lmap, zip, range
from statsmodels.compat.numpy import np_matrix_rank
import numpy as np
from pandas import DataFrame, Series, TimeSeries, isnull
from statsmodels.tools.decorators import (resettable_cache, cache_readonly,
cache_writable)
import statsmodels.tools.data as data_util
from statsmodels.tools.sm_exceptions import MissingDataError
def _asarray_2dcolumns(x):
if np.asarray(x).ndim > 1 and np.asarray(x).squeeze().ndim == 1:
return
def _asarray_2d_null_rows(x):
"""
Makes sure input is an array and is 2d. Makes sure output is 2d. True
indicates a null in the rows of 2d x.
"""
#Have to have the asarrays because isnull doesn't account for array-like
#input
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
return np.any(isnull(x), axis=1)[:, None]
def _nan_rows(*arrs):
"""
Returns a boolean array which is True where any of the rows in any
of the _2d_ arrays in arrs are NaNs. Inputs can be any mixture of Series,
DataFrames or array-like.
"""
if len(arrs) == 1:
arrs += ([[False]],)
def _nan_row_maybe_two_inputs(x, y):
# check for dtype bc dataframe has dtypes
x_is_boolean_array = hasattr(x, 'dtype') and x.dtype == bool and x
return np.logical_or(_asarray_2d_null_rows(x),
(x_is_boolean_array | _asarray_2d_null_rows(y)))
return reduce(_nan_row_maybe_two_inputs, arrs).squeeze()
class ModelData(object):
"""
Class responsible for handling input data and extracting metadata into the
appropriate form
"""
_param_names = None
def __init__(self, endog, exog=None, missing='none', hasconst=None,
**kwargs):
if 'design_info' in kwargs:
self.design_info = kwargs.pop('design_info')
if 'formula' in kwargs:
self.formula = kwargs.pop('formula')
if missing != 'none':
arrays, nan_idx = self.handle_missing(endog, exog, missing,
**kwargs)
self.missing_row_idx = nan_idx
self.__dict__.update(arrays) # attach all the data arrays
self.orig_endog = self.endog
self.orig_exog = self.exog
self.endog, self.exog = self._convert_endog_exog(self.endog,
self.exog)
else:
self.__dict__.update(kwargs) # attach the extra arrays anyway
self.orig_endog = endog
self.orig_exog = exog
self.endog, self.exog = self._convert_endog_exog(endog, exog)
# this has side-effects, attaches k_constant and const_idx
self._handle_constant(hasconst)
self._check_integrity()
self._cache = resettable_cache()
def __getstate__(self):
from copy import copy
d = copy(self.__dict__)
if "design_info" in d:
del d["design_info"]
d["restore_design_info"] = True
return d
def __setstate__(self, d):
if "restore_design_info" in d:
# NOTE: there may be a more performant way to do this
from patsy import dmatrices, PatsyError
exc = []
try:
data = d['frame']
except KeyError:
data = d['orig_endog'].join(d['orig_exog'])
for depth in [2, 3, 1, 0, 4]: # sequence is a guess where to likely find it
try:
_, design = dmatrices(d['formula'], data, eval_env=depth,
return_type='dataframe')
break
except (NameError, PatsyError) as e:
print('not in depth %d' % depth)
exc.append(e) # why do I need a reference from outside except block
pass
else:
raise exc[-1]
self.design_info = design.design_info
del d["restore_design_info"]
self.__dict__.update(d)
def _handle_constant(self, hasconst):
if hasconst is not None:
if hasconst:
self.k_constant = 1
self.const_idx = None
else:
self.k_constant = 0
self.const_idx = None
elif self.exog is None:
self.const_idx = None
self.k_constant = 0
else:
# detect where the constant is
check_implicit = False
const_idx = np.where(self.exog.ptp(axis=0) == 0)[0].squeeze()
self.k_constant = const_idx.size
if self.k_constant == 1:
if self.exog[:, const_idx].mean() != 0:
self.const_idx = const_idx
else:
# we only have a zero column and no other constant
check_implicit = True
elif self.k_constant > 1:
# we have more than one constant column
# look for ones
values = [] # keep values if we need != 0
for idx in const_idx:
value = self.exog[:, idx].mean()
if value == 1:
self.k_constant = 1
self.const_idx = idx
break
values.append(value)
else:
# we didn't break, no column of ones
pos = (np.array(values) != 0)
if pos.any():
# take the first nonzero column
self.k_constant = 1
self.const_idx = const_idx[pos.argmax()]
else:
# only zero columns
check_implicit = True
elif self.k_constant == 0:
check_implicit = True
else:
# shouldn't be here
pass
if check_implicit:
# look for implicit constant
# Compute rank of augmented matrix
augmented_exog = np.column_stack(
(np.ones(self.exog.shape[0]), self.exog))
rank_augm = np_matrix_rank(augmented_exog)
rank_orig = np_matrix_rank(self.exog)
self.k_constant = int(rank_orig == rank_augm)
self.const_idx = None
@classmethod
def _drop_nans(cls, x, nan_mask):
return x[nan_mask]
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
return x[nan_mask][:, nan_mask]
@classmethod
def handle_missing(cls, endog, exog, missing, **kwargs):
"""
This returns a dictionary with keys endog, exog and the keys of
kwargs. It preserves Nones.
"""
none_array_names = []
# patsy's already dropped NaNs in y/X
missing_idx = kwargs.pop('missing_idx', None)
if missing_idx is not None:
# y, X already handled by patsy. add back in later.
combined = ()
combined_names = []
if exog is None:
none_array_names += ['exog']
elif exog is not None:
combined = (endog, exog)
combined_names = ['endog', 'exog']
else:
combined = (endog,)
combined_names = ['endog']
none_array_names += ['exog']
# deal with other arrays
combined_2d = ()
combined_2d_names = []
if len(kwargs):
for key, value_array in iteritems(kwargs):
if value_array is None or value_array.ndim == 0:
none_array_names += [key]
continue
# grab 1d arrays
if value_array.ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
elif value_array.squeeze().ndim == 1:
combined += (np.asarray(value_array),)
combined_names += [key]
# grab 2d arrays that are _assumed_ to be symmetric
elif value_array.ndim == 2:
combined_2d += (np.asarray(value_array),)
combined_2d_names += [key]
else:
raise ValueError("Arrays with more than 2 dimensions "
"aren't yet handled")
if missing_idx is not None:
nan_mask = missing_idx
updated_row_mask = None
if combined: # there were extra arrays not handled by patsy
combined_nans = _nan_rows(*combined)
if combined_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra arrays given to model.")
# for going back and updated endog/exog
updated_row_mask = combined_nans[~nan_mask]
nan_mask |= combined_nans # for updating extra arrays only
if combined_2d:
combined_2d_nans = _nan_rows(combined_2d)
if combined_2d_nans.shape[0] != nan_mask.shape[0]:
raise ValueError("Shape mismatch between endog/exog "
"and extra 2d arrays given to model.")
if updated_row_mask is not None:
updated_row_mask |= combined_2d_nans[~nan_mask]
else:
updated_row_mask = combined_2d_nans[~nan_mask]
nan_mask |= combined_2d_nans
else:
nan_mask = _nan_rows(*combined)
if combined_2d:
nan_mask = _nan_rows(*(nan_mask[:, None],) + combined_2d)
if not np.any(nan_mask): # no missing don't do anything
combined = dict(zip(combined_names, combined))
if combined_2d:
combined.update(dict(zip(combined_2d_names, combined_2d)))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
if missing_idx is not None:
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
return combined, []
elif missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
elif missing == 'drop':
nan_mask = ~nan_mask
drop_nans = lambda x: cls._drop_nans(x, nan_mask)
drop_nans_2d = lambda x: cls._drop_nans_2d(x, nan_mask)
combined = dict(zip(combined_names, lmap(drop_nans, combined)))
if missing_idx is not None:
if updated_row_mask is not None:
updated_row_mask = ~updated_row_mask
# update endog/exog with this new information
endog = cls._drop_nans(endog, updated_row_mask)
if exog is not None:
exog = cls._drop_nans(exog, updated_row_mask)
combined.update({'endog': endog})
if exog is not None:
combined.update({'exog': exog})
if combined_2d:
combined.update(dict(zip(combined_2d_names,
lmap(drop_nans_2d, combined_2d))))
if none_array_names:
combined.update(dict(zip(none_array_names,
[None] * len(none_array_names))))
return combined, np.where(~nan_mask)[0].tolist()
else:
raise ValueError("missing option %s not understood" % missing)
def _convert_endog_exog(self, endog, exog):
# for consistent outputs if endog is (n,1)
yarr = self._get_yarr(endog)
xarr = None
if exog is not None:
xarr = self._get_xarr(exog)
if xarr.ndim == 1:
xarr = xarr[:, None]
if xarr.ndim != 2:
raise ValueError("exog is not 1d or 2d")
return yarr, xarr
@cache_writable()
def ynames(self):
endog = self.orig_endog
ynames = self._get_names(endog)
if not ynames:
ynames = _make_endog_names(self.endog)
if len(ynames) == 1:
return ynames[0]
else:
return list(ynames)
@cache_writable()
def xnames(self):
exog = self.orig_exog
if exog is not None:
xnames = self._get_names(exog)
if not xnames:
xnames = _make_exog_names(self.exog)
return list(xnames)
return None
@property
def param_names(self):
# for handling names of 'extra' parameters in summary, etc.
return self._param_names or self.xnames
@param_names.setter
def param_names(self, values):
self._param_names = values
@cache_readonly
def row_labels(self):
exog = self.orig_exog
if exog is not None:
row_labels = self._get_row_labels(exog)
else:
endog = self.orig_endog
row_labels = self._get_row_labels(endog)
return row_labels
def _get_row_labels(self, arr):
return None
def _get_names(self, arr):
if isinstance(arr, DataFrame):
return list(arr.columns)
elif isinstance(arr, Series):
if arr.name:
return [arr.name]
else:
return
else:
try:
return arr.dtype.names
except AttributeError:
pass
return None
def _get_yarr(self, endog):
if data_util._is_structured_ndarray(endog):
endog = data_util.struct_to_ndarray(endog)
endog = np.asarray(endog)
if len(endog) == 1: # never squeeze to a scalar
if endog.ndim == 1:
return endog
elif endog.ndim > 1:
return np.asarray([endog.squeeze()])
return endog.squeeze()
def _get_xarr(self, exog):
if data_util._is_structured_ndarray(exog):
exog = data_util.struct_to_ndarray(exog)
return np.asarray(exog)
def _check_integrity(self):
if self.exog is not None:
if len(self.exog) != len(self.endog):
raise ValueError("endog and exog matrices are different sizes")
def wrap_output(self, obj, how='columns', names=None):
if how == 'columns':
return self.attach_columns(obj)
elif how == 'rows':
return self.attach_rows(obj)
elif how == 'cov':
return self.attach_cov(obj)
elif how == 'dates':
return self.attach_dates(obj)
elif how == 'columns_eq':
return self.attach_columns_eq(obj)
elif how == 'cov_eq':
return self.attach_cov_eq(obj)
elif how == 'generic_columns':
return self.attach_generic_columns(obj, names)
elif how == 'generic_columns_2d':
return self.attach_generic_columns_2d(obj, names)
else:
return obj
def attach_columns(self, result):
return result
def attach_columns_eq(self, result):
return result
def attach_cov(self, result):
return result
def attach_cov_eq(self, result):
return result
def attach_rows(self, result):
return result
def attach_dates(self, result):
return result
def attach_generic_columns(self, result, *args, **kwargs):
return result
def attach_generic_columns_2d(self, result, *args, **kwargs):
return result
class PatsyData(ModelData):
def _get_names(self, arr):
return arr.design_info.column_names
class PandasData(ModelData):
"""
Data handling class which knows how to reattach pandas metadata to model
results
"""
def _convert_endog_exog(self, endog, exog=None):
#TODO: remove this when we handle dtype systematically
endog = np.asarray(endog)
exog = exog if exog is None else np.asarray(exog)
if endog.dtype == object or exog is not None and exog.dtype == object:
raise ValueError("Pandas data cast to numpy dtype of object. "
"Check input data with np.asarray(data).")
return super(PandasData, self)._convert_endog_exog(endog, exog)
@classmethod
def _drop_nans(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans(x, nan_mask)
@classmethod
def _drop_nans_2d(cls, x, nan_mask):
if hasattr(x, 'ix'):
return x.ix[nan_mask].ix[:, nan_mask]
else: # extra arguments could be plain ndarrays
return super(PandasData, cls)._drop_nans_2d(x, nan_mask)
def _check_integrity(self):
endog, exog = self.orig_endog, self.orig_exog
# exog can be None and we could be upcasting one or the other
if (exog is not None and
(hasattr(endog, 'index') and hasattr(exog, 'index')) and
not self.orig_endog.index.equals(self.orig_exog.index)):
raise ValueError("The indices for endog and exog are not aligned")
super(PandasData, self)._check_integrity()
def _get_row_labels(self, arr):
try:
return arr.index
except AttributeError:
# if we've gotten here it's because endog is pandas and
# exog is not, so just return the row labels from endog
return self.orig_endog.index
def attach_generic_columns(self, result, names):
# get the attribute to use
column_names = getattr(self, names, None)
return Series(result, index=column_names)
def attach_generic_columns_2d(self, result, rownames, colnames=None):
colnames = colnames or rownames
rownames = getattr(self, rownames, None)
colnames = getattr(self, colnames, None)
return DataFrame(result, index=rownames, columns=colnames)
def attach_columns(self, result):
# this can either be a 1d array or a scalar
# don't squeeze because it might be a 2d row array
# if it needs a squeeze, the bug is elsewhere
if result.ndim <= 1:
return Series(result, index=self.param_names)
else: # for e.g., confidence intervals
return DataFrame(result, index=self.param_names)
def attach_columns_eq(self, result):
return DataFrame(result, index=self.xnames, columns=self.ynames)
def attach_cov(self, result):
return DataFrame(result, index=self.param_names,
columns=self.param_names)
def attach_cov_eq(self, result):
return DataFrame(result, index=self.ynames, columns=self.ynames)
def attach_rows(self, result):
# assumes if len(row_labels) > len(result) it's bc it was truncated
# at the front, for AR lags, for example
if result.squeeze().ndim == 1:
return Series(result, index=self.row_labels[-len(result):])
else: # this is for VAR results, may not be general enough
return DataFrame(result, index=self.row_labels[-len(result):],
columns=self.ynames)
def attach_dates(self, result):
return TimeSeries(result, index=self.predict_dates)
def _make_endog_names(endog):
if endog.ndim == 1 or endog.shape[1] == 1:
ynames = ['y']
else: # for VAR
ynames = ['y%d' % (i+1) for i in range(endog.shape[1])]
return ynames
def _make_exog_names(exog):
exog_var = exog.var(0)
if (exog_var == 0).any():
# assumes one constant in first or last position
# avoid exception if more than one constant
const_idx = exog_var.argmin()
exog_names = ['x%d' % i for i in range(1, exog.shape[1])]
exog_names.insert(const_idx, 'const')
else:
exog_names = ['x%d' % i for i in range(1, exog.shape[1]+1)]
return exog_names
def handle_missing(endog, exog=None, missing='none', **kwargs):
klass = handle_data_class_factory(endog, exog)
if missing == 'none':
ret_dict = dict(endog=endog, exog=exog)
ret_dict.update(kwargs)
return ret_dict, None
return klass.handle_missing(endog, exog, missing=missing, **kwargs)
def handle_data_class_factory(endog, exog):
"""
Given inputs
"""
if data_util._is_using_ndarray_type(endog, exog):
klass = ModelData
elif data_util._is_using_pandas(endog, exog):
klass = PandasData
elif data_util._is_using_patsy(endog, exog):
klass = PatsyData
# keep this check last
elif data_util._is_using_ndarray(endog, exog):
klass = ModelData
else:
raise ValueError('unrecognized data structures: %s / %s' %
(type(endog), type(exog)))
return klass
def handle_data(endog, exog, missing='none', hasconst=None, **kwargs):
# deal with lists and tuples up-front
if isinstance(endog, (list, tuple)):
endog = np.asarray(endog)
if isinstance(exog, (list, tuple)):
exog = np.asarray(exog)
klass = handle_data_class_factory(endog, exog)
return klass(endog, exog=exog, missing=missing, hasconst=hasconst,
**kwargs)
| bsd-3-clause |
mgunyho/pyspread | pyspread/src/lib/_grid_cairo_renderer.py | 1 | 42397 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Martin Manns
# Distributed under the terms of the GNU General Public License
# --------------------------------------------------------------------
# pyspread is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyspread is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyspread. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------
"""
_grid_cairo_renderer.py
=======================
Provides
--------
* GridCairoRenderer: Renders grid slice to Cairo context
* GridCellCairoRenderer: Renders grid cell to Cairo context
* GridCellContentCairoRenderer: Renders cell content to Cairo context
* GridCellBackgroundCairoRenderer: Renders cell background to Cairo context
* GridCellBorderCairoRenderer: Renders cell border to Cairo context
"""
import math
import sys
import warnings
from operator import attrgetter
import cairo
import numpy
import wx
import wx.lib.wxcairo
try:
import matplotlib.pyplot as pyplot
from matplotlib.backends.backend_cairo import RendererCairo
from matplotlib.backends.backend_cairo import FigureCanvasCairo
from matplotlib.transforms import Affine2D
except ImportError:
pyplot = None
try:
from enchant.checker import SpellChecker
except ImportError:
SpellChecker = None
import pango
import pangocairo
from src.lib.parsers import color_pack2rgb, is_svg
STANDARD_ROW_HEIGHT = 20
STANDARD_COL_WIDTH = 50
try:
from src.config import config
MAX_RESULT_LENGTH = config["max_result_length"]
except ImportError:
MAX_RESULT_LENGTH = 100000
class GridCairoRenderer(object):
"""Renders a grid slice to a CairoSurface
Parameters
----------
* context: pycairo.context
\tThe Cairo context to be drawn to
* code_array: model.code_array
\tGrid data structure that yields rendering information
* row_tb: 2 tuple of Integer
\tStart and stop of row range with step 1
* col_lr: 2 tuple of Integer
\tStart and stop of col range with step 1
* tab_fl: 2 tuple of Integer
\tStart and stop of tab range with step 1
* width: Float
\tPage width in points
* height: Float
\tPage height in points
* orientation: String in ["portrait", "landscape"]
\tPage orientation
* x_offset: Float, defaults to 20.5
\t X offset from bage border in points
* y_offset: Float, defaults to 20.5
\t Y offset from bage border in points
"""
def __init__(self, context, code_array, row_tb, col_rl, tab_fl,
width, height, orientation, x_offset=20.5, y_offset=20.5,
view_frozen=False, spell_check=False):
self.context = context
self.code_array = code_array
self.row_tb = row_tb
self.col_rl = col_rl
self.tab_fl = tab_fl
self.width = width
self.height = height
self.x_offset = x_offset
self.y_offset = y_offset
self.orientation = orientation
self.view_frozen = view_frozen
self.spell_check = spell_check
def get_cell_rect(self, row, col, tab):
"""Returns rectangle of cell on canvas"""
top_row = self.row_tb[0]
left_col = self.col_rl[0]
pos_x = self.x_offset
pos_y = self.y_offset
merge_area = self._get_merge_area((row, col, tab))
for __row in xrange(top_row, row):
__row_height = self.code_array.get_row_height(__row, tab)
pos_y += __row_height
for __col in xrange(left_col, col):
__col_width = self.code_array.get_col_width(__col, tab)
pos_x += __col_width
if merge_area is None:
height = self.code_array.get_row_height(row, tab)
width = self.code_array.get_col_width(col, tab)
else:
# We have a merged cell
top, left, bottom, right = merge_area
# Are we drawing the top left cell?
if top == row and left == col:
# Set rect to merge area
heights = (self.code_array.get_row_height(__row, tab)
for __row in xrange(top, bottom+1))
widths = (self.code_array.get_col_width(__col, tab)
for __col in xrange(left, right+1))
height = sum(heights)
width = sum(widths)
else:
# Do not draw the cell because it is hidden
return
return pos_x, pos_y, width, height
def _get_merge_area(self, key):
"""Returns the merge area of a merged cell
Merge area is a 4 tuple (top, left, bottom, right)
"""
cell_attributes = self.code_array.cell_attributes[key]
return cell_attributes["merge_area"]
def draw(self):
"""Draws slice to context"""
row_start, row_stop = self.row_tb
col_start, col_stop = self.col_rl
tab_start, tab_stop = self.tab_fl
for tab in xrange(tab_start, tab_stop):
# Scale context to page extent
# In order to keep the aspect ration intact use the maximum
first_key = row_start, col_start, tab
first_rect = self.get_cell_rect(*first_key)
# If we have a merged cell then use the top left cell rect
if first_rect is None:
top, left, __, __ = self._get_merge_area(first_key)
first_rect = self.get_cell_rect(top, left, tab)
last_key = row_stop - 1, col_stop - 1, tab
last_rect = self.get_cell_rect(*last_key)
# If we have a merged cell then use the top left cell rect
if last_rect is None:
top, left, __, __ = self._get_merge_area(last_key)
last_rect = self.get_cell_rect(top, left, tab)
x_extent = last_rect[0] + last_rect[2] - first_rect[0]
y_extent = last_rect[1] + last_rect[3] - first_rect[1]
scale_x = (self.width - 2 * self.x_offset) / float(x_extent)
scale_y = (self.height - 2 * self.y_offset) / float(y_extent)
self.context.save()
# Translate offset to o
self.context.translate(first_rect[0], first_rect[1])
# Scale to fit page, do not change aspect ratio
scale = min(scale_x, scale_y)
self.context.scale(scale, scale)
# Translate offset
self.context.translate(-self.x_offset, -self.y_offset)
# TODO: Center the grid on the page
# Render cells
for row in xrange(row_start, row_stop):
for col in xrange(col_start, col_stop):
key = row, col, tab
rect = self.get_cell_rect(row, col, tab) # Rect
if rect is not None:
cell_renderer = GridCellCairoRenderer(
self.context,
self.code_array,
key, # (row, col, tab)
rect,
self.view_frozen
)
cell_renderer.draw()
# Undo scaling, translation, ...
self.context.restore()
self.context.show_page()
class GridCellCairoRenderer(object):
"""Renders a grid cell to a CairoSurface
Parameters
----------
* context: pycairo.context
\tThe Cairo context to be drawn to
* code_array: model.code_array
\tGrid data structure that yields rendering information
* key: 3 tuple of Integer
\tKey of cell to be rendered
* rect: 4 tuple of float
\tx, y, width and height of cell rectangle
"""
def __init__(self, context, code_array, key, rect, view_frozen=False,
spell_check=False):
self.context = context
self.code_array = code_array
self.key = key
self.rect = rect
self.view_frozen = view_frozen
self.spell_check = spell_check
def draw(self):
"""Draws cell to context"""
cell_background_renderer = GridCellBackgroundCairoRenderer(
self.context,
self.code_array,
self.key,
self.rect,
self.view_frozen)
cell_content_renderer = GridCellContentCairoRenderer(
self.context,
self.code_array,
self.key,
self.rect,
self.spell_check)
cell_border_renderer = GridCellBorderCairoRenderer(
self.context,
self.code_array,
self.key,
self.rect)
cell_background_renderer.draw()
cell_content_renderer.draw()
cell_border_renderer.draw()
class GridCellContentCairoRenderer(object):
"""Renders cell content to Cairo context
Parameters
----------
* context: pycairo.context
\tThe Cairo context to be drawn to
* code_array: model.code_array
\tGrid data structure that yields rendering information
* key: 3 tuple of Integer
\tKey of cell to be rendered
"""
def __init__(self, context, code_array, key, rect, spell_check=False):
self.context = context
self.code_array = code_array
self.key = key
self.rect = rect
self.spell_check = spell_check
def get_cell_content(self):
"""Returns cell content"""
try:
if self.code_array.cell_attributes[self.key]["button_cell"]:
return
except IndexError:
return
try:
return self.code_array[self.key]
except IndexError:
pass
def _get_scalexy(self, ims_width, ims_height):
"""Returns scale_x, scale_y for bitmap display"""
# Get cell attributes
cell_attributes = self.code_array.cell_attributes[self.key]
angle = cell_attributes["angle"]
if abs(angle) == 90:
scale_x = self.rect[3] / float(ims_width)
scale_y = self.rect[2] / float(ims_height)
else:
# Normal case
scale_x = self.rect[2] / float(ims_width)
scale_y = self.rect[3] / float(ims_height)
return scale_x, scale_y
def _get_translation(self, ims_width, ims_height):
"""Returns x and y for a bitmap translation"""
# Get cell attributes
cell_attributes = self.code_array.cell_attributes[self.key]
justification = cell_attributes["justification"]
vertical_align = cell_attributes["vertical_align"]
angle = cell_attributes["angle"]
scale_x, scale_y = self._get_scalexy(ims_width, ims_height)
scale = min(scale_x, scale_y)
if angle not in (90, 180, -90):
# Standard direction
x = -2 # Otherwise there is a white border
y = -2 # Otherwise there is a white border
if scale_x > scale_y:
if justification == "center":
x += (self.rect[2] - ims_width * scale) / 2
elif justification == "right":
x += self.rect[2] - ims_width * scale
else:
if vertical_align == "middle":
y += (self.rect[3] - ims_height * scale) / 2
elif vertical_align == "bottom":
y += self.rect[3] - ims_height * scale
if angle == 90:
x = -ims_width * scale + 2
y = -2
if scale_y > scale_x:
if justification == "center":
y += (self.rect[2] - ims_height * scale) / 2
elif justification == "right":
y += self.rect[2] - ims_height * scale
else:
if vertical_align == "middle":
x -= (self.rect[3] - ims_width * scale) / 2
elif vertical_align == "bottom":
x -= self.rect[3] - ims_width * scale
elif angle == 180:
x = -ims_width * scale + 2
y = -ims_height * scale + 2
if scale_x > scale_y:
if justification == "center":
x -= (self.rect[2] - ims_width * scale) / 2
elif justification == "right":
x -= self.rect[2] - ims_width * scale
else:
if vertical_align == "middle":
y -= (self.rect[3] - ims_height * scale) / 2
elif vertical_align == "bottom":
y -= self.rect[3] - ims_height * scale
elif angle == -90:
x = -2
y = -ims_height * scale + 2
if scale_y > scale_x:
if justification == "center":
y -= (self.rect[2] - ims_height * scale) / 2
elif justification == "right":
y -= self.rect[2] - ims_height * scale
else:
if vertical_align == "middle":
x += (self.rect[3] - ims_width * scale) / 2
elif vertical_align == "bottom":
x += self.rect[3] - ims_width * scale
return x, y
def draw_bitmap(self, content):
"""Draws bitmap cell content to context"""
if content.HasAlpha():
image = wx.ImageFromBitmap(content)
image.ConvertAlphaToMask()
image.SetMask(False)
content = wx.BitmapFromImage(image)
ims = wx.lib.wxcairo.ImageSurfaceFromBitmap(content)
ims_width = ims.get_width()
ims_height = ims.get_height()
transx, transy = self._get_translation(ims_width, ims_height)
scale_x, scale_y = self._get_scalexy(ims_width, ims_height)
scale = min(scale_x, scale_y)
angle = float(self.code_array.cell_attributes[self.key]["angle"])
self.context.save()
self.context.rotate(-angle / 360 * 2 * math.pi)
self.context.translate(transx, transy)
self.context.scale(scale, scale)
self.context.set_source_surface(ims, 0, 0)
self.context.paint()
self.context.restore()
def draw_svg(self, svg_str):
"""Draws svg string to cell"""
try:
import rsvg
except ImportError:
self.draw_text(svg_str)
return
svg = rsvg.Handle(data=svg_str)
svg_width, svg_height = svg.get_dimension_data()[:2]
transx, transy = self._get_translation(svg_width, svg_height)
scale_x, scale_y = self._get_scalexy(svg_width, svg_height)
scale = min(scale_x, scale_y)
angle = float(self.code_array.cell_attributes[self.key]["angle"])
self.context.save()
self.context.rotate(-angle / 360 * 2 * math.pi)
self.context.translate(transx, transy)
self.context.scale(scale, scale)
svg.render_cairo(self.context)
self.context.restore()
def draw_matplotlib_figure(self, figure):
"""Draws matplotlib figure to context"""
class CustomRendererCairo(RendererCairo):
"""Workaround for older versins with limited draw path length"""
if sys.byteorder == 'little':
BYTE_FORMAT = 0 # BGRA
else:
BYTE_FORMAT = 1 # ARGB
def draw_path(self, gc, path, transform, rgbFace=None):
ctx = gc.ctx
transform = transform + Affine2D().scale(1.0, -1.0).\
translate(0, self.height)
ctx.new_path()
self.convert_path(ctx, path, transform)
try:
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(),
gc.get_forced_alpha())
except AttributeError:
# Workaround for some Windiws version of Cairo
self._fill_and_stroke(ctx, rgbFace, gc.get_alpha())
def draw_image(self, gc, x, y, im):
# bbox - not currently used
rows, cols, buf = im.color_conv(self.BYTE_FORMAT)
surface = cairo.ImageSurface.create_for_data(
buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)
ctx = gc.ctx
y = self.height - y - rows
ctx.save()
ctx.set_source_surface(surface, x, y)
if gc.get_alpha() != 1.0:
ctx.paint_with_alpha(gc.get_alpha())
else:
ctx.paint()
ctx.restore()
if pyplot is None:
# Matplotlib is not installed
return
FigureCanvasCairo(figure)
dpi = float(figure.dpi)
# Set a border so that the figure is not cut off at the cell border
border_x = 200 / (self.rect[2] / dpi) ** 2
border_y = 200 / (self.rect[3] / dpi) ** 2
width = (self.rect[2] - 2 * border_x) / dpi
height = (self.rect[3] - 2 * border_y) / dpi
figure.set_figwidth(width)
figure.set_figheight(height)
renderer = CustomRendererCairo(dpi)
renderer.set_width_height(width, height)
renderer.gc.ctx = self.context
renderer.text_ctx = self.context
self.context.save()
self.context.translate(border_x, border_y + height * dpi)
figure.draw(renderer)
self.context.restore()
def _get_text_color(self):
"""Returns text color rgb tuple of right line"""
color = self.code_array.cell_attributes[self.key]["textcolor"]
return tuple(c / 255.0 for c in color_pack2rgb(color))
def set_font(self, pango_layout):
"""Sets the font for draw_text"""
wx2pango_weights = {
wx.FONTWEIGHT_BOLD: pango.WEIGHT_BOLD,
wx.FONTWEIGHT_LIGHT: pango.WEIGHT_LIGHT,
wx.FONTWEIGHT_NORMAL: None, # Do not set a weight by default
}
wx2pango_styles = {
wx.FONTSTYLE_NORMAL: None, # Do not set a style by default
wx.FONTSTYLE_SLANT: pango.STYLE_OBLIQUE,
wx.FONTSTYLE_ITALIC: pango.STYLE_ITALIC,
}
cell_attributes = self.code_array.cell_attributes[self.key]
# Text font attributes
textfont = cell_attributes["textfont"]
pointsize = cell_attributes["pointsize"]
fontweight = cell_attributes["fontweight"]
fontstyle = cell_attributes["fontstyle"]
underline = cell_attributes["underline"]
strikethrough = cell_attributes["strikethrough"]
# Now construct the pango font
font_description = pango.FontDescription(
" ".join([textfont, str(pointsize)]))
pango_layout.set_font_description(font_description)
attrs = pango.AttrList()
# Underline
attrs.insert(pango.AttrUnderline(underline, 0, MAX_RESULT_LENGTH))
# Weight
weight = wx2pango_weights[fontweight]
if weight is not None:
attrs.insert(pango.AttrWeight(weight, 0, MAX_RESULT_LENGTH))
# Style
style = wx2pango_styles[fontstyle]
if style is not None:
attrs.insert(pango.AttrStyle(style, 0, MAX_RESULT_LENGTH))
# Strikethrough
attrs.insert(pango.AttrStrikethrough(strikethrough, 0,
MAX_RESULT_LENGTH))
pango_layout.set_attributes(attrs)
def _rotate_cell(self, angle, rect, back=False):
"""Rotates and translates cell if angle in [90, -90, 180]"""
if angle == 90 and not back:
self.context.rotate(-math.pi / 2.0)
self.context.translate(-rect[2] + 4, 0)
elif angle == 90 and back:
self.context.translate(rect[2] - 4, 0)
self.context.rotate(math.pi / 2.0)
elif angle == -90 and not back:
self.context.rotate(math.pi / 2.0)
self.context.translate(0, -rect[3] + 2)
elif angle == -90 and back:
self.context.translate(0, rect[3] - 2)
self.context.rotate(-math.pi / 2.0)
elif angle == 180 and not back:
self.context.rotate(math.pi)
self.context.translate(-rect[2] + 4, -rect[3] + 2)
elif angle == 180 and back:
self.context.translate(rect[2] - 4, rect[3] - 2)
self.context.rotate(-math.pi)
def _draw_error_underline(self, ptx, pango_layout, start, stop):
"""Draws an error underline"""
self.context.save()
self.context.set_source_rgb(1.0, 0.0, 0.0)
pit = pango_layout.get_iter()
# Skip characters until start
for i in xrange(start):
pit.next_char()
extents_list = []
for char_no in xrange(start, stop):
char_extents = pit.get_char_extents()
underline_pixel_extents = [
char_extents[0] / pango.SCALE,
(char_extents[1] + char_extents[3]) / pango.SCALE - 2,
char_extents[2] / pango.SCALE,
4,
]
if extents_list:
if extents_list[-1][1] == underline_pixel_extents[1]:
# Same line
extents_list[-1][2] = extents_list[-1][2] + \
underline_pixel_extents[2]
else:
# Line break
extents_list.append(underline_pixel_extents)
else:
extents_list.append(underline_pixel_extents)
pit.next_char()
for extent in extents_list:
pangocairo.show_error_underline(ptx, *extent)
self.context.restore()
def _check_spelling(self, text, lang="en_US"):
"""Returns a list of start stop tuples that have spelling errors
Parameters
----------
text: Unicode or string
\tThe text that is checked
lang: String, defaults to "en_US"
\tName of spell checking dictionary
"""
chkr = SpellChecker(lang)
chkr.set_text(text)
word_starts_ends = []
for err in chkr:
start = err.wordpos
stop = err.wordpos + len(err.word) + 1
word_starts_ends.append((start, stop))
return word_starts_ends
def draw_text(self, content):
"""Draws text cell content to context"""
wx2pango_alignment = {
"left": pango.ALIGN_LEFT,
"center": pango.ALIGN_CENTER,
"right": pango.ALIGN_RIGHT,
}
cell_attributes = self.code_array.cell_attributes[self.key]
angle = cell_attributes["angle"]
if angle in [-90, 90]:
rect = self.rect[1], self.rect[0], self.rect[3], self.rect[2]
else:
rect = self.rect
# Text color attributes
self.context.set_source_rgb(*self._get_text_color())
ptx = pangocairo.CairoContext(self.context)
pango_layout = ptx.create_layout()
self.set_font(pango_layout)
pango_layout.set_wrap(pango.WRAP_WORD_CHAR)
pango_layout.set_width(int(round((rect[2] - 4.0) * pango.SCALE)))
try:
markup = cell_attributes["markup"]
except KeyError:
# Old file
markup = False
if markup:
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always")
pango_layout.set_markup(unicode(content))
if warning_lines:
w2unicode = lambda m: unicode(m.message)
msg = u"\n".join(map(w2unicode, warning_lines))
pango_layout.set_text(msg)
else:
pango_layout.set_text(unicode(content))
alignment = cell_attributes["justification"]
pango_layout.set_alignment(wx2pango_alignment[alignment])
# Shift text for vertical alignment
extents = pango_layout.get_pixel_extents()
downshift = 0
if cell_attributes["vertical_align"] == "bottom":
downshift = rect[3] - extents[1][3] - 4
elif cell_attributes["vertical_align"] == "middle":
downshift = int((rect[3] - extents[1][3]) / 2) - 2
self.context.save()
self._rotate_cell(angle, rect)
self.context.translate(0, downshift)
# Spell check underline drawing
if SpellChecker is not None and self.spell_check:
text = unicode(pango_layout.get_text())
lang = config["spell_lang"]
for start, stop in self._check_spelling(text, lang=lang):
self._draw_error_underline(ptx, pango_layout, start, stop-1)
ptx.update_layout(pango_layout)
ptx.show_layout(pango_layout)
self.context.restore()
def draw_roundedrect(self, x, y, w, h, r=10):
"""Draws a rounded rectangle"""
# A****BQ
# H C
# * *
# G D
# F****E
context = self.context
context.save
context.move_to(x+r, y) # Move to A
context.line_to(x+w-r, y) # Straight line to B
context.curve_to(x+w, y, x+w, y, x+w, y+r) # Curve to C
# Control points are both at Q
context.line_to(x+w, y+h-r) # Move to D
context.curve_to(x+w, y+h, x+w, y+h, x+w-r, y+h) # Curve to E
context.line_to(x+r, y+h) # Line to F
context.curve_to(x, y+h, x, y+h, x, y+h-r) # Curve to G
context.line_to(x, y+r) # Line to H
context.curve_to(x, y, x, y, x+r, y) # Curve to A
context.restore
def draw_button(self, x, y, w, h, label):
"""Draws a button"""
context = self.context
self.draw_roundedrect(x, y, w, h)
context.clip()
# Set up the gradients
gradient = cairo.LinearGradient(0, 0, 0, 1)
gradient.add_color_stop_rgba(0, 0.5, 0.5, 0.5, 0.1)
gradient.add_color_stop_rgba(1, 0.8, 0.8, 0.8, 0.9)
# # Transform the coordinates so the width and height are both 1
# # We save the current settings and restore them afterward
context.save()
context.scale(w, h)
context.rectangle(0, 0, 1, 1)
context.set_source_rgb(0, 0, 1)
context.set_source(gradient)
context.fill()
context.restore()
# Draw the button text
# Center the text
x_bearing, y_bearing, width, height, x_advance, y_advance = \
context.text_extents(label)
text_x = (w / 2.0)-(width / 2.0 + x_bearing)
text_y = (h / 2.0)-(height / 2.0 + y_bearing) + 1
# Draw the button text
context.move_to(text_x, text_y)
context.set_source_rgba(0, 0, 0, 1)
context.show_text(label)
# Draw the border of the button
context.move_to(x, y)
context.set_source_rgba(0, 0, 0, 1)
self.draw_roundedrect(x, y, w, h)
context.stroke()
def draw(self):
"""Draws cell content to context"""
# Content is only rendered within rect
self.context.save()
self.context.rectangle(*self.rect)
self.context.clip()
content = self.get_cell_content()
pos_x, pos_y = self.rect[:2]
self.context.translate(pos_x + 2, pos_y + 2)
cell_attributes = self.code_array.cell_attributes
# Do not draw cell content if cell is too small
# This allows blending out small cells by reducing height to 0
if self.rect[2] < cell_attributes[self.key]["borderwidth_right"] or \
self.rect[3] < cell_attributes[self.key]["borderwidth_bottom"]:
self.context.restore()
return
if self.code_array.cell_attributes[self.key]["button_cell"]:
# Render a button instead of the cell
label = self.code_array.cell_attributes[self.key]["button_cell"]
self.draw_button(1, 1, self.rect[2]-5, self.rect[3]-5, label)
elif isinstance(content, wx._gdi.Bitmap):
# A bitmap is returned --> Draw it!
self.draw_bitmap(content)
elif pyplot is not None and isinstance(content, pyplot.Figure):
# A matplotlib figure is returned --> Draw it!
self.draw_matplotlib_figure(content)
elif isinstance(content, basestring) and is_svg(content):
# The content is a vaid SVG xml string
self.draw_svg(content)
elif content is not None:
self.draw_text(content)
self.context.translate(-pos_x - 2, -pos_y - 2)
# Remove clipping to rect
self.context.restore()
class GridCellBackgroundCairoRenderer(object):
"""Renders cell background to Cairo context
Parameters
----------
* context: pycairo.context
\tThe Cairo context to be drawn to
* code_array: model.code_array
\tGrid data structure that yields rendering information
* key: 3 tuple of Integer
\tKey of cell to be rendered
* view_frozen: Bool
\tIf true then paint frozen background pattern for frozen cells
"""
def __init__(self, context, code_array, key, rect, view_frozen):
self.context = context
self.cell_attributes = code_array.cell_attributes
self.key = key
self.rect = rect
self.view_frozen = view_frozen
def _get_background_color(self):
"""Returns background color rgb tuple of right line"""
color = self.cell_attributes[self.key]["bgcolor"]
return tuple(c / 255.0 for c in color_pack2rgb(color))
def _draw_frozen_pattern(self):
"""Draws frozen pattern, i.e. diagonal lines"""
self.context.save()
x, y, w, h = self.rect
self.context.set_source_rgb(0, 0, 1)
self.context.set_line_width(0.25)
self.context.rectangle(*self.rect)
self.context.clip()
for __x in numpy.arange(x-h, x+w, 5):
self.context.move_to(__x, y + h)
self.context.line_to(__x + h, y)
self.context.stroke()
self.context.restore()
def draw(self):
"""Draws cell background to context"""
self.context.set_source_rgb(*self._get_background_color())
self.context.rectangle(*self.rect)
self.context.fill()
# If show frozen is active, show frozen pattern
if self.view_frozen and self.cell_attributes[self.key]["frozen"]:
self._draw_frozen_pattern()
class CellBorder(object):
"""Cell border
Parameters
----------
start_point: 2 tuple of Integer
\tStart point of line
end_point
\tEnd point of line
width: Float
\tWidth of line
color: 3-tuple of float
\tRGB line color, each value is in [0, 1]
"""
def __init__(self, start_point, end_point, width, color):
self.start_point = start_point
self.end_point = end_point
self.width = width
self.color = color
def draw(self, context):
"""Draws self to Cairo context"""
context.set_line_width(self.width)
context.set_source_rgb(*self.color)
context.move_to(*self.start_point)
context.line_to(*self.end_point)
context.stroke()
class Cell(object):
"""Cell"""
def __init__(self, key, rect, cell_attributes):
self.row, self.col, self.tab = self.key = key
self.x, self.y, self.width, self.height = rect
self.cell_attributes = cell_attributes
def get_above_key_rect(self):
"""Returns tuple key rect of above cell"""
key_above = self.row - 1, self.col, self.tab
border_width_bottom = \
float(self.cell_attributes[key_above]["borderwidth_bottom"]) / 2.0
rect_above = (self.x, self.y-border_width_bottom,
self.width, border_width_bottom)
return key_above, rect_above
def get_below_key_rect(self):
"""Returns tuple key rect of below cell"""
key_below = self.row + 1, self.col, self.tab
border_width_bottom = \
float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.0
rect_below = (self.x, self.y+self.height,
self.width, border_width_bottom)
return key_below, rect_below
def get_left_key_rect(self):
"""Returns tuple key rect of left cell"""
key_left = self.row, self.col - 1, self.tab
border_width_right = \
float(self.cell_attributes[key_left]["borderwidth_right"]) / 2.0
rect_left = (self.x-border_width_right, self.y,
border_width_right, self.height)
return key_left, rect_left
def get_right_key_rect(self):
"""Returns tuple key rect of right cell"""
key_right = self.row, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
rect_right = (self.x+self.width, self.y,
border_width_right, self.height)
return key_right, rect_right
def get_above_left_key_rect(self):
"""Returns tuple key rect of above left cell"""
key_above_left = self.row - 1, self.col - 1, self.tab
border_width_right = \
float(self.cell_attributes[key_above_left]["borderwidth_right"]) \
/ 2.0
border_width_bottom = \
float(self.cell_attributes[key_above_left]["borderwidth_bottom"]) \
/ 2.0
rect_above_left = (self.x-border_width_right,
self.y-border_width_bottom,
border_width_right, border_width_bottom)
return key_above_left, rect_above_left
def get_above_right_key_rect(self):
"""Returns tuple key rect of above right cell"""
key_above = self.row - 1, self.col, self.tab
key_above_right = self.row - 1, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[key_above]["borderwidth_right"]) / 2.0
border_width_bottom = \
float(self.cell_attributes[key_above_right]["borderwidth_bottom"])\
/ 2.0
rect_above_right = (self.x+self.width, self.y-border_width_bottom,
border_width_right, border_width_bottom)
return key_above_right, rect_above_right
def get_below_left_key_rect(self):
"""Returns tuple key rect of below left cell"""
key_left = self.row, self.col - 1, self.tab
key_below_left = self.row + 1, self.col - 1, self.tab
border_width_right = \
float(self.cell_attributes[key_below_left]["borderwidth_right"]) \
/ 2.0
border_width_bottom = \
float(self.cell_attributes[key_left]["borderwidth_bottom"]) / 2.0
rect_below_left = (self.x-border_width_right, self.y-self.height,
border_width_right, border_width_bottom)
return key_below_left, rect_below_left
def get_below_right_key_rect(self):
"""Returns tuple key rect of below right cell"""
key_below_right = self.row + 1, self.col + 1, self.tab
border_width_right = \
float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
border_width_bottom = \
float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.0
rect_below_right = (self.x+self.width, self.y-self.height,
border_width_right, border_width_bottom)
return key_below_right, rect_below_right
class CellBorders(object):
"""All 12 relevant borders around a cell
tl tr
| t |
lt -|---|- rt
|l r|
lb -|---|- rb
| b |
bl br
Parameters
----------
key: 3 tuple
\tCell key
"""
def __init__(self, cell_attributes, key, rect):
self.key = key
self.rect = rect
self.cell_attributes = cell_attributes
self.cell = Cell(key, rect, cell_attributes)
def _get_bottom_line_coordinates(self):
"""Returns start and stop coordinates of bottom line"""
rect_x, rect_y, rect_width, rect_height = self.rect
start_point = rect_x, rect_y + rect_height
end_point = rect_x + rect_width, rect_y + rect_height
return start_point, end_point
def _get_right_line_coordinates(self):
"""Returns start and stop coordinates of right line"""
rect_x, rect_y, rect_width, rect_height = self.rect
start_point = rect_x + rect_width, rect_y
end_point = rect_x + rect_width, rect_y + rect_height
return start_point, end_point
def _get_bottom_line_color(self):
"""Returns color rgb tuple of bottom line"""
color = self.cell_attributes[self.key]["bordercolor_bottom"]
return tuple(c / 255.0 for c in color_pack2rgb(color))
def _get_right_line_color(self):
"""Returns color rgb tuple of right line"""
color = self.cell_attributes[self.key]["bordercolor_right"]
return tuple(c / 255.0 for c in color_pack2rgb(color))
def _get_bottom_line_width(self):
"""Returns width of bottom line"""
return float(self.cell_attributes[self.key]["borderwidth_bottom"]) / 2.
def _get_right_line_width(self):
"""Returns width of right line"""
return float(self.cell_attributes[self.key]["borderwidth_right"]) / 2.0
def get_b(self):
"""Returns the bottom border of the cell"""
start_point, end_point = self._get_bottom_line_coordinates()
width = self._get_bottom_line_width()
color = self._get_bottom_line_color()
return CellBorder(start_point, end_point, width, color)
def get_r(self):
"""Returns the right border of the cell"""
start_point, end_point = self._get_right_line_coordinates()
width = self._get_right_line_width()
color = self._get_right_line_color()
return CellBorder(start_point, end_point, width, color)
def get_t(self):
"""Returns the top border of the cell"""
cell_above = CellBorders(self.cell_attributes,
*self.cell.get_above_key_rect())
return cell_above.get_b()
def get_l(self):
"""Returns the left border of the cell"""
cell_left = CellBorders(self.cell_attributes,
*self.cell.get_left_key_rect())
return cell_left.get_r()
def get_tl(self):
"""Returns the top left border of the cell"""
cell_above_left = CellBorders(self.cell_attributes,
*self.cell.get_above_left_key_rect())
return cell_above_left.get_r()
def get_tr(self):
"""Returns the top right border of the cell"""
cell_above = CellBorders(self.cell_attributes,
*self.cell.get_above_key_rect())
return cell_above.get_r()
def get_rt(self):
"""Returns the right top border of the cell"""
cell_above_right = CellBorders(self.cell_attributes,
*self.cell.get_above_right_key_rect())
return cell_above_right.get_b()
def get_rb(self):
"""Returns the right bottom border of the cell"""
cell_right = CellBorders(self.cell_attributes,
*self.cell.get_right_key_rect())
return cell_right.get_b()
def get_br(self):
"""Returns the bottom right border of the cell"""
cell_below = CellBorders(self.cell_attributes,
*self.cell.get_below_key_rect())
return cell_below.get_r()
def get_bl(self):
"""Returns the bottom left border of the cell"""
cell_below_left = CellBorders(self.cell_attributes,
*self.cell.get_below_left_key_rect())
return cell_below_left.get_r()
def get_lb(self):
"""Returns the left bottom border of the cell"""
cell_left = CellBorders(self.cell_attributes,
*self.cell.get_left_key_rect())
return cell_left.get_b()
def get_lt(self):
"""Returns the left top border of the cell"""
cell_above_left = CellBorders(self.cell_attributes,
*self.cell.get_above_left_key_rect())
return cell_above_left.get_b()
def gen_all(self):
"""Generator of all borders"""
borderfuncs = [
self.get_b, self.get_r, self.get_t, self.get_l,
self.get_tl, self.get_tr, self.get_rt, self.get_rb,
self.get_br, self.get_bl, self.get_lb, self.get_lt,
]
for borderfunc in borderfuncs:
yield borderfunc()
class GridCellBorderCairoRenderer(object):
"""Renders cell border to Cairo context
Parameters
----------
* context: pycairo.context
\tThe Cairo context to be drawn to
* code_array: model.code_array
\tGrid data structure that yields rendering information
* key: 3-tuple of Integer
\tKey of cell to be rendered
"""
def __init__(self, context, code_array, key, rect):
self.context = context
self.code_array = code_array
self.cell_attributes = code_array.cell_attributes
self.key = key
self.rect = rect
def draw(self):
"""Draws cell border to context"""
# Lines should have a square cap to avoid ugly edges
self.context.set_line_cap(cairo.LINE_CAP_SQUARE)
self.context.save()
self.context.rectangle(*self.rect)
self.context.clip()
cell_borders = CellBorders(self.cell_attributes, self.key, self.rect)
borders = list(cell_borders.gen_all())
borders.sort(key=attrgetter('width', 'color'))
for border in borders:
border.draw(self.context)
self.context.restore()
| gpl-3.0 |
julienmalard/Tikon | pruebas/test_central/rcrs/modelo_calib.py | 1 | 2561 | import numpy as np
import pandas as pd
import xarray as xr
from tikon.central import Módulo, SimulMódulo, Modelo, Exper, Parcela, Coso
from tikon.central.res import Resultado
from tikon.datos.datos import Datos
from tikon.ecs import ÁrbolEcs, CategEc, EcuaciónVacía, SubcategEc, Ecuación, Parám
from tikon.ecs.aprioris import APrioriDens
from tikon.datos import Obs
from tikon.utils import EJE_TIEMPO, EJE_PARC, EJE_ESTOC
f_inic = '2000-01-01'
class A(Parám):
nombre = 'a'
unids = None
líms = (None, None)
apriori = APrioriDens((0, 3), .90)
eje_cosos = 'coso'
class EcuaciónParám(Ecuación):
nombre = 'ec'
eje_cosos = 'coso'
cls_ramas = [A]
_nombre_res = 'res'
def eval(símismo, paso, sim):
ant = símismo.obt_valor_res(sim)
n_estoc = len(ant.coords[EJE_ESTOC])
return ant + símismo.cf['a'] + Datos(
(np.random.random(n_estoc) - 0.5) * 0.1, coords={EJE_ESTOC: np.arange(n_estoc)}, dims=[EJE_ESTOC]
)
class SubCategParám(SubcategEc):
nombre = 'subcateg'
cls_ramas = [EcuaciónParám, EcuaciónVacía]
eje_cosos = 'coso'
_nombre_res = 'res'
class CategParám(CategEc):
nombre = 'categ'
cls_ramas = [SubCategParám]
eje_cosos = 'coso'
class EcsParám(ÁrbolEcs):
nombre = 'árbol'
cls_ramas = [CategParám]
class CosoParám(Coso):
def __init__(símismo, nombre):
super().__init__(nombre, EcsParám)
class Res(Resultado):
nombre = 'res'
unids = None
def __init__(símismo, sim, coords, vars_interés):
coords = {'coso': sim.ecs.cosos, **coords}
super().__init__(sim, coords, vars_interés)
class SimulMóduloValid(SimulMódulo):
resultados = [Res]
def incrementar(símismo, paso, f):
super().incrementar(paso, f)
class MóduloValid(Módulo):
nombre = 'módulo'
cls_simul = SimulMóduloValid
cls_ecs = EcsParám
eje_coso = 'coso'
class MisObs(Obs):
mód = 'módulo'
var = 'res'
def generar(fechas=True):
coso = CosoParám('hola')
if fechas:
tiempos = pd.date_range(f_inic, periods=10, freq='D')
else:
tiempos = np.arange(10)
obs = MisObs(
datos=xr.DataArray(
np.arange(10),
coords={EJE_TIEMPO: tiempos}, dims=[EJE_TIEMPO]
).expand_dims({EJE_PARC: ['parcela'], 'coso': [coso]})
)
exper = Exper('exper', Parcela('parcela'), obs=obs)
modelo = Modelo(MóduloValid(coso))
return {'coso': coso, 'obs': obs, 'exper': exper, 'modelo': modelo}
| agpl-3.0 |
Victor-Haefner/polyvr | extras/python/Myo/myo.py | 1 | 3106 | from __future__ import print_function
from collections import Counter, deque
import sys
import time
import numpy as np
try:
from sklearn import neighbors, svm
HAVE_SK = True
except ImportError:
HAVE_SK = False
from common import *
from myo_raw import MyoRaw
SUBSAMPLE = 3
K = 15
class NNClassifier(object):
'''A wrapper for sklearn's nearest-neighbor classifier that stores
training data in vals0, ..., vals9.dat.'''
def __init__(self):
for i in range(10):
with open('vals%d.dat' % i, 'ab') as f: pass
self.read_data()
def store_data(self, cls, vals):
with open('vals%d.dat' % cls, 'ab') as f:
f.write(pack('8H', *vals))
self.train(np.vstack([self.X, vals]), np.hstack([self.Y, [cls]]))
def read_data(self):
X = []
Y = []
for i in range(10):
X.append(np.fromfile('vals%d.dat' % i, dtype=np.uint16).reshape((-1, 8)))
Y.append(i + np.zeros(X[-1].shape[0]))
self.train(np.vstack(X), np.hstack(Y))
def train(self, X, Y):
self.X = X
self.Y = Y
if HAVE_SK and self.X.shape[0] >= K * SUBSAMPLE:
self.nn = neighbors.KNeighborsClassifier(n_neighbors=K, algorithm='kd_tree')
self.nn.fit(self.X[::SUBSAMPLE], self.Y[::SUBSAMPLE])
else:
self.nn = None
def nearest(self, d):
dists = ((self.X - d)**2).sum(1)
ind = dists.argmin()
return self.Y[ind]
def classify(self, d):
if self.X.shape[0] < K * SUBSAMPLE: return 0
if not HAVE_SK: return self.nearest(d)
return int(self.nn.predict(d)[0])
class Myo(MyoRaw):
'''Adds higher-level pose classification and handling onto MyoRaw.'''
HIST_LEN = 25
def __init__(self, cls, tty=None):
MyoRaw.__init__(self, tty)
self.cls = cls
self.history = deque([0] * Myo.HIST_LEN, Myo.HIST_LEN)
self.history_cnt = Counter(self.history)
self.add_emg_handler(self.emg_handler)
self.last_pose = None
self.pose_handlers = []
def emg_handler(self, emg, moving):
y = self.cls.classify(emg)
self.history_cnt[self.history[0]] -= 1
self.history_cnt[y] += 1
self.history.append(y)
r, n = self.history_cnt.most_common(1)[0]
if self.last_pose is None or (n > self.history_cnt[self.last_pose] + 5 and n > Myo.HIST_LEN / 2):
self.on_raw_pose(r)
self.last_pose = r
def add_raw_pose_handler(self, h):
self.pose_handlers.append(h)
def on_raw_pose(self, pose):
print(pose)
for h in self.pose_handlers:
h(pose)
if __name__ == '__main__':
import subprocess
m = Myo(NNClassifier(), sys.argv[1] if len(sys.argv) >= 2 else None)
m.add_raw_pose_handler(print)
def page(pose):
if pose == 5:
subprocess.call(['xte', 'key Page_Down'])
elif pose == 6:
subprocess.call(['xte', 'key Page_Up'])
m.add_raw_pose_handler(page)
m.connect()
while True:
m.run()
| gpl-3.0 |
toastedcornflakes/scikit-learn | examples/svm/plot_svm_regression.py | 120 | 1520 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
lw = 2
plt.scatter(X, y, color='darkorange', label='data')
plt.hold('on')
plt.plot(X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(X, y_lin, color='c', lw=lw, label='Linear model')
plt.plot(X, y_poly, color='cornflowerblue', lw=lw, label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
pchmieli/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_get_modelKmeans.py | 1 | 1228 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
def get_modelKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing benign.csv data...\n")
benign_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/benign.csv"))
#benign_h2o.summary()
benign_sci = np.genfromtxt(pyunit_utils.locate("smalldata/logreg/benign.csv"), delimiter=",")
# Impute missing values with column mean
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
benign_sci = imp.fit_transform(benign_sci)
from h2o.estimators.kmeans import H2OKMeansEstimator
for i in range(2,7):
# Log.info("H2O K-Means")
km_h2o = H2OKMeansEstimator(k=i)
km_h2o.train(x=range(benign_h2o.ncol), training_frame=benign_h2o)
km_h2o.show()
model = h2o.get_model(km_h2o._id)
model.show()
km_sci = KMeans(n_clusters=i, init='k-means++', n_init=1)
km_sci.fit(benign_sci)
print "sckit centers"
print km_sci.cluster_centers_
if __name__ == "__main__":
pyunit_utils.standalone_test(get_modelKmeans)
else:
get_modelKmeans()
| apache-2.0 |
TheCamusean/DLRCev3 | unsupervised_models/unsupervised_models/vae.py | 1 | 4665 | import tensorflow as tf
from tensorflow.python import debug as tf_debug
class VariationalAutoencoder(object):
def __init__(self, n_hidden, optimizer = tf.train.AdamOptimizer(), image_size=(32,32), debug=False):
self.n_input = image_size[0] * image_size[1]
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float64, shape=[None,self.n_input])
#self.image_resize = tf.image.resize_bilinear(self.x, size=(32,32), name="image_resize")
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float64)
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
if debug:
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['log_sigma_w1'] = tf.get_variable("log_sigma_w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float64))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float64))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float64))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float64))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.z: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
import numpy as np
import sklearn.preprocessing as prep
import os
from scipy import misc
from tqdm import tqdm
modes = [None, 'L', None, 'RGB', 'RGBA']
def read_images(path_to_images, size=None):
"""
Return numpy array of images from directories
:param path_to_images:
:return:
"""
image_file_paths = map(lambda x: os.path.join(path_to_images, x), os.listdir(path_to_images))
image_file_paths = filter(lambda x: "jpeg" in x or "jpg" in x or "png" in x, image_file_paths)
images = []
for image_path in tqdm(image_file_paths):
image = misc.imread(image_path, mode='L')
images.append(image)
if size:
for i, img in enumerate(images):
print(img.shape[-1])
images[i] = misc.imresize(img, size, mode='L').reshape(size[0], size[1],1)
return np.asarray(images)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_batch(data, batch_size, reset=False):
data_size = len(data)
for i in np.arange(0, data_size-batch_size, batch_size):
yield data[i:i+batch_size]
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.