repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rs2/pandas | pandas/tests/test_take.py | 3 | 16875 | from datetime import datetime
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._testing as tm
import pandas.core.algorithms as algos
@pytest.fixture(params=[True, False])
def writeable(request):
return request.param
# Check that take_nd works both with writeable arrays
# (in which case fast typed memory-views implementation)
# and read-only arrays alike.
@pytest.fixture(
params=[
(np.float64, True),
(np.float32, True),
(np.uint64, False),
(np.uint32, False),
(np.uint16, False),
(np.uint8, False),
(np.int64, False),
(np.int32, False),
(np.int16, False),
(np.int8, False),
(np.object_, True),
(np.bool_, False),
]
)
def dtype_can_hold_na(request):
return request.param
@pytest.fixture(
params=[
(np.int8, np.int16(127), np.int8),
(np.int8, np.int16(128), np.int16),
(np.int32, 1, np.int32),
(np.int32, 2.0, np.float64),
(np.int32, 3.0 + 4.0j, np.complex128),
(np.int32, True, np.object_),
(np.int32, "", np.object_),
(np.float64, 1, np.float64),
(np.float64, 2.0, np.float64),
(np.float64, 3.0 + 4.0j, np.complex128),
(np.float64, True, np.object_),
(np.float64, "", np.object_),
(np.complex128, 1, np.complex128),
(np.complex128, 2.0, np.complex128),
(np.complex128, 3.0 + 4.0j, np.complex128),
(np.complex128, True, np.object_),
(np.complex128, "", np.object_),
(np.bool_, 1, np.object_),
(np.bool_, 2.0, np.object_),
(np.bool_, 3.0 + 4.0j, np.object_),
(np.bool_, True, np.bool_),
(np.bool_, "", np.object_),
]
)
def dtype_fill_out_dtype(request):
return request.param
class TestTake:
# Standard incompatible fill error.
fill_error = re.compile("Incompatible type for fill_value")
def test_1d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, 4).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out = np.empty(4, dtype=dtype)
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
tm.assert_almost_equal(out, expected)
indexer = [2, 1, 0, -1]
out = np.empty(4, dtype=dtype)
if can_hold_na:
algos.take_1d(data, indexer, out=out)
expected = data.take(indexer)
expected[3] = np.nan
tm.assert_almost_equal(out, expected)
else:
with pytest.raises(TypeError, match=self.fill_error):
algos.take_1d(data, indexer, out=out)
# No Exception otherwise.
data.take(indexer, out=out)
def test_1d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, 4).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2]] == data[[2, 1, 0]]).all()
assert result[3] == fill_value
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_1d(data, indexer, fill_value=fill_value)
assert (result[[0, 1, 2, 3]] == data[indexer]).all()
assert result.dtype == dtype
def test_2d_with_out(self, dtype_can_hold_na, writeable):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
data.flags.writeable = writeable
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 3), dtype=dtype)
out1 = np.empty((5, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected0[3, :] = np.nan
expected1[:, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
else:
for i, out in enumerate([out0, out1]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_2d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :] == data[[2, 1, 0], :]).all()
assert (result[3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2]] == data[:, [2, 1, 0]]).all()
assert (result[:, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :] == data[indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3]] == data[:, indexer]).all()
assert result.dtype == dtype
def test_3d_with_out(self, dtype_can_hold_na):
dtype, can_hold_na = dtype_can_hold_na
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, 1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
indexer = [2, 1, 0, -1]
out0 = np.empty((4, 4, 3), dtype=dtype)
out1 = np.empty((5, 4, 3), dtype=dtype)
out2 = np.empty((5, 4, 4), dtype=dtype)
if can_hold_na:
algos.take_nd(data, indexer, out=out0, axis=0)
algos.take_nd(data, indexer, out=out1, axis=1)
algos.take_nd(data, indexer, out=out2, axis=2)
expected0 = data.take(indexer, axis=0)
expected1 = data.take(indexer, axis=1)
expected2 = data.take(indexer, axis=2)
expected0[3, :, :] = np.nan
expected1[:, 3, :] = np.nan
expected2[:, :, 3] = np.nan
tm.assert_almost_equal(out0, expected0)
tm.assert_almost_equal(out1, expected1)
tm.assert_almost_equal(out2, expected2)
else:
for i, out in enumerate([out0, out1, out2]):
with pytest.raises(TypeError, match=self.fill_error):
algos.take_nd(data, indexer, out=out, axis=i)
# No Exception otherwise.
data.take(indexer, out=out, axis=i)
def test_3d_fill_nonna(self, dtype_fill_out_dtype):
dtype, fill_value, out_dtype = dtype_fill_out_dtype
data = np.random.randint(0, 2, (5, 4, 3)).astype(dtype)
indexer = [2, 1, 0, -1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2], :, :] == data[[2, 1, 0], :, :]).all()
assert (result[3, :, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2], :] == data[:, [2, 1, 0], :]).all()
assert (result[:, 3, :] == fill_value).all()
assert result.dtype == out_dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2]] == data[:, :, [2, 1, 0]]).all()
assert (result[:, :, 3] == fill_value).all()
assert result.dtype == out_dtype
indexer = [2, 1, 0, 1]
result = algos.take_nd(data, indexer, axis=0, fill_value=fill_value)
assert (result[[0, 1, 2, 3], :, :] == data[indexer, :, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=1, fill_value=fill_value)
assert (result[:, [0, 1, 2, 3], :] == data[:, indexer, :]).all()
assert result.dtype == dtype
result = algos.take_nd(data, indexer, axis=2, fill_value=fill_value)
assert (result[:, :, [0, 1, 2, 3]] == data[:, :, indexer]).all()
assert result.dtype == dtype
def test_1d_other_dtypes(self):
arr = np.random.randn(10).astype(np.float32)
indexer = [1, 2, 3, -1]
result = algos.take_1d(arr, indexer)
expected = arr.take(indexer)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_other_dtypes(self):
arr = np.random.randn(10, 5).astype(np.float32)
indexer = [1, 2, 3, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
expected = arr.take(indexer, axis=0)
expected[-1] = np.nan
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
expected = arr.take(indexer, axis=1)
expected[:, -1] = np.nan
tm.assert_almost_equal(result, expected)
def test_1d_bool(self):
arr = np.array([0, 1, 0], dtype=bool)
result = algos.take_1d(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1])
tm.assert_numpy_array_equal(result, expected)
result = algos.take_1d(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_bool(self):
arr = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 1]], dtype=bool)
result = algos.take_nd(arr, [0, 2, 2, 1])
expected = arr.take([0, 2, 2, 1], axis=0)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, 2, 1], axis=1)
expected = arr.take([0, 2, 2, 1], axis=1)
tm.assert_numpy_array_equal(result, expected)
result = algos.take_nd(arr, [0, 2, -1])
assert result.dtype == np.object_
def test_2d_float32(self):
arr = np.random.randn(4, 3).astype(np.float32)
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = np.nan
tm.assert_almost_equal(result, expected)
# this now accepts a float32! # test with float64 out buffer
out = np.empty((len(indexer), arr.shape[1]), dtype="float32")
algos.take_nd(arr, indexer, out=out) # it works!
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = np.nan
tm.assert_almost_equal(result, expected)
def test_2d_datetime64(self):
# 2005/01/01 - 2006/01/01
arr = np.random.randint(11_045_376, 11_360_736, (5, 3)) * 100_000_000_000
arr = arr.view(dtype="datetime64[ns]")
indexer = [0, 2, -1, 1, -1]
# axis=0
result = algos.take_nd(arr, indexer, axis=0)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=0, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected.view(np.int64)[[2, 4], :] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=0, fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(
arr, indexer, out=result2, axis=0, fill_value=datetime(2007, 1, 1)
)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=0)
expected[[2, 4], :] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
# axis=1
result = algos.take_nd(arr, indexer, axis=1)
result2 = np.empty_like(result)
algos.take_nd(arr, indexer, axis=1, out=result2)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected.view(np.int64)[:, [2, 4]] = iNaT
tm.assert_almost_equal(result, expected)
result = algos.take_nd(arr, indexer, axis=1, fill_value=datetime(2007, 1, 1))
result2 = np.empty_like(result)
algos.take_nd(
arr, indexer, out=result2, axis=1, fill_value=datetime(2007, 1, 1)
)
tm.assert_almost_equal(result, result2)
expected = arr.take(indexer, axis=1)
expected[:, [2, 4]] = datetime(2007, 1, 1)
tm.assert_almost_equal(result, expected)
def test_take_axis_0(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1])
expected = np.array([[0, 1, 2], [9, 10, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], allow_fill=True, fill_value=0)
expected = np.array([[0, 1, 2], [0, 0, 0]])
tm.assert_numpy_array_equal(result, expected)
def test_take_axis_1(self):
arr = np.arange(12).reshape(4, 3)
result = algos.take(arr, [0, -1], axis=1)
expected = np.array([[0, 2], [3, 5], [6, 8], [9, 11]])
tm.assert_numpy_array_equal(result, expected)
# allow_fill=True
result = algos.take(arr, [0, -1], axis=1, allow_fill=True, fill_value=0)
expected = np.array([[0, 0], [3, 0], [6, 0], [9, 0]])
tm.assert_numpy_array_equal(result, expected)
# GH#26976 make sure we validate along the correct axis
with pytest.raises(IndexError, match="indices are out-of-bounds"):
algos.take(arr, [0, 3], axis=1, allow_fill=True, fill_value=0)
class TestExtensionTake:
# The take method found in pd.api.extensions
def test_bounds_check_large(self):
arr = np.array([1, 2])
msg = "indices are out-of-bounds"
with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=True)
msg = "index 2 is out of bounds for( axis 0 with)? size 2"
with pytest.raises(IndexError, match=msg):
algos.take(arr, [2, 3], allow_fill=False)
def test_bounds_check_small(self):
arr = np.array([1, 2, 3], dtype=np.int64)
indexer = [0, -1, -2]
msg = r"'indices' contains values less than allowed \(-2 < -1\)"
with pytest.raises(ValueError, match=msg):
algos.take(arr, indexer, allow_fill=True)
result = algos.take(arr, indexer)
expected = np.array([1, 3, 2], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("allow_fill", [True, False])
def test_take_empty(self, allow_fill):
arr = np.array([], dtype=np.int64)
# empty take is ok
result = algos.take(arr, [], allow_fill=allow_fill)
tm.assert_numpy_array_equal(arr, result)
msg = (
"cannot do a non-empty take from an empty axes.|"
"indices are out-of-bounds"
)
with pytest.raises(IndexError, match=msg):
algos.take(arr, [0], allow_fill=allow_fill)
def test_take_na_empty(self):
result = algos.take(np.array([]), [-1, -1], allow_fill=True, fill_value=0.0)
expected = np.array([0.0, 0.0])
tm.assert_numpy_array_equal(result, expected)
def test_take_coerces_list(self):
arr = [1, 2, 3]
result = algos.take(arr, [0, 0])
expected = np.array([1, 1])
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
toastedcornflakes/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
miltonsarria/dsp-python | filters/examen3.py | 1 | 4408 | #Examen 2
#Octubre 11 - 2017
#procesamiento digital de senales
#universidad santiago de cali
#Nombre:
#ID:
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
from fourierFunc import fourierAn
from wav_rw import wavread
#definir archivo de audio
archivo='/home/sarria/Documents/2017B/dsp-python/audio/sound/prueba_exam.wav'
######################
# modificar el rango de frecuencias para ver una banda especifica
rango =[0.0, 8000.0]
(fs,x)=wavread(archivo)
#normalizar
x=x/np.max(np.abs(x))
print('frecuencia de muestreo: ' + str(fs) + ', numero de muestras: ' + str(x.size))
window = 'hamming'
long_ventana = 0.025 #en segundos
incremento = 0.01#en segundos
#en muestras
M = int(fs * long_ventana)
H = int(fs * incremento)
#generar ventana y normalizarla
w = get_window(window, M)
#obtener la transformada de fourier del archivo de audio (magnitud y la fase)
mX, pX = wp.stftAnal(x, w, N, H)
# graficar el archivo de audio
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('input sound: x')
# graficar la magnitud del espectro en decibeles
plt.subplot(2,1,2)
numFrames = int(X[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq=np.linspace(0,fs/2,mX.shape[1])
bins=(binFreq>rango[0]) & (binFreq<rango[1])
Sxx=np.transpose(mX[:,bins])
binFreq=binFreq[bins]
plt.pcolormesh(frmTime,binFreq,Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.autoscale(tight=True)
#graficar el espectro de todo el archivo de audio sin ventanear
M = x.size
w = get_window(window, M)
w = w / sum(w)
xw = x*w
mX,pX = wp.magFourier(xw,xw.size)
binFreq=np.linspace(0,fs/2,mX.size)
bins=(binFreq>rango[0]) & (binFreq<rango[1])
plt.figure(2)
plt.plot(binFreq[bins],mX[bins])
plt.xlabel('Frequency [Hz]')
plt.ylabel('Magnitud en dB')
plt.show()
'''
##########################################
#BLOQUE 1
#definir la frecuencia de muestreo
Fs=1
tf=5 #tiempo final
#definir la secuencia de tiempo hasta 5 segundos
nT=np.linspace(1./Fs,tf,Fs*tf);
#generar secuencia discreta x[n]
x=2*np.sin(12*np.pi*nT)+3*np.cos(40*np.pi*nT)
#usar fourier para identificar las componentes frecuenciales
absX,Xdb,pX=fourierAn(x)
f=np.linspace(-Fs/2,Fs/2,Xdb.size)
#visualizar los resultados del analisis hecho con transformada de fourier
plt.ion()
plt.subplot(211)
plt.plot(nT,x)
plt.ylabel('x[n]')
plt.xlabel('tiempo - s')
plt.subplot(212)
plt.plot(f,Xdb)
plt.ylabel('|X| en dB')
plt.xlabel('Frecuencia - Hz')
plt.draw()
'''
##########################################
#BLOQUE 2
'''
#disenar filtro que permita pasar unicamente la componente de frecuencia mas baja
#modificar los parametros que sean necesarios
b1 = signal.firwin(3, 0.5, window='hamming', pass_zero=True)
#obtener la respuesta en frecuencia
w, h = signal.freqz(b1)
#filtrar la onda con el filtro numero 1
x1=signal.lfilter(b1, [1.0],x)
#usar fourier para ilustrar el resultado del filtro
absX1,X1db,pX1=fourierAn(x1)
#
plt.figure(2)
#ilustrar la respuesta en frecuencia del filtro
plt.subplot(311)
plt.title('Respuesta en frecuencia de filtro digital numero 1')
plt.plot(w, 20 * np.log10(abs(h)), 'b')
plt.ylabel('Amplitud [dB]', color='b')
#ilustrar los resultados
plt.subplot(312)
plt.plot(nT,x1)
plt.ylabel('x1[n] - filtrada')
plt.xlabel('tiempo - s')
plt.subplot(313)
plt.plot(f,X1db)
plt.ylabel('|X1| en dB')
plt.xlabel('Frecuencia - Hz')
plt.draw()
'''
##########################################
#BLOQUE 3
'''
#disenar filtro que permita pasar unicamente la componente de frecuencia mas baja
#modificar los parametros que sean necesarios
b2 = signal.firwin(3, 0.5, window='hamming', pass_zero=False)
#obtener la respuesta en frecuencia
w, h = signal.freqz(b2)
#filtrar la onda con el filtro numero 1
x2=signal.lfilter(b2, [1.0],x)
#usar fourier para ilustrar el resultado del filtro
absX2,X2db,pX2=fourierAn(x2)
#
plt.figure(3)
#ilustrar la respuesta en frecuencia del filtro
plt.subplot(311)
plt.title('Respuesta en frecuencia de filtro digital numero 2')
plt.plot(w, 20 * np.log10(abs(h)), 'b')
plt.ylabel('Amplitud [dB]', color='b')
#ilustrar los resultados
plt.subplot(312)
plt.plot(nT,x2)
plt.ylabel('x2[n] - filtrada')
plt.xlabel('tiempo - s')
plt.subplot(313)
plt.plot(f,X2db)
plt.ylabel('|X2| en dB')
plt.xlabel('Frecuencia - Hz')
plt.draw()
'''
| mit |
robertlayton/authorship_tutorials | pyconau2014/get_twitter.py | 1 | 1082 | """Gets data from twitter.
Collects tweets, usually English, by searching for random stop words (i.e.
normal everyday words like "that", "which", "and").
You'll need a Twitter API key for that.
This isn't currently a command line program -- you'll need to change variables
in the code itself.
"""
from TwitterAPI import TwitterAPI
from getpass import getpass
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stop_words
from random import choice
num_to_get = 10000
stop_words = list(stop_words)
access_key = # Put your Twitter API information here
access_secret = # And here
consumer_key = # and here
consumer_secret = # and here!
api = TwitterAPI(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token_key=access_key, access_token_secret=access_secret)
c = 0
for i in range(int(num_to_get / 10)):
word = choice(stop_words)
r = api.request('search/tweets', {'q': word})
for item in r.get_iterator():
c += 1
if c > num_to_get: break
try:
print(repr(item['text']))
except:
pass
| bsd-3-clause |
briandrawert/pyurdme | examples/hes1/hes1_label.py | 5 | 3775 | import matplotlib.pyplot as plt
import os.path
import pyurdme
import dolfin
import numpy
class MeshSize(pyurdme.URDMEDataFunction):
def __init__(self,mesh):
pyurdme.URDMEDataFunction.__init__(self,name="MeshSize")
self.mesh = mesh
self.h = mesh.get_mesh_size()
def map(self,x):
ret = self.h[self.mesh.closest_vertex(x)]
return ret
class hes1(pyurdme.URDMEModel):
def __init__(self,model_name="hes1"):
pyurdme.URDMEModel.__init__(self, model_name)
#Species
Pf = pyurdme.Species(name="Pf",diffusion_constant=0.,dimension=3)
Po = pyurdme.Species(name="Po",diffusion_constant=0.,dimension=3)
mRNA = pyurdme.Species(name="mRNA",diffusion_constant=6.e-1,dimension=3)
protein = pyurdme.Species(name="protein",diffusion_constant=6.e-1,dimension=3)
self.add_species([Pf,Po,mRNA,protein])
#Domains
basedir = os.path.dirname(os.path.abspath(__file__))
self.mesh = pyurdme.URDMEMesh.read_mesh(basedir+"/mesh/cell.msh")
volumes = dolfin.MeshFunction("size_t",self.mesh,basedir+"/mesh/cell_physical_region.xml")
self.add_subdomain(volumes)
h = self.mesh.get_mesh_size()
self.add_data_function(MeshSize(self.mesh))
#Parameters
k1 = pyurdme.Parameter(name="k1",expression=1.e9)
k2 = pyurdme.Parameter(name="k2",expression=0.1)
alpha_m = pyurdme.Parameter(name="alpha_m",expression=3.)
alpha_m_gamma = pyurdme.Parameter(name="alpha_m_gamma",expression=3./30.)
alpha_p = pyurdme.Parameter(name="alpha_p",expression=1.)
mu_m = pyurdme.Parameter(name="mu_m",expression=0.015)
mu_p = pyurdme.Parameter(name="mu_p",expression=0.043)
self.add_parameter([k1,k2,alpha_m,alpha_m_gamma,alpha_p,mu_m,mu_p])
#Domains markers
nucleus = [1]
cytoplasm = [2]
promoter_site = [1]
#Reactions
R1 = pyurdme.Reaction(name="R1",reactants={Pf:1,protein:1},products={Po:1},massaction=True,rate=k1,restrict_to=promoter_site)
R2 = pyurdme.Reaction(name="R2",reactants={Po:1},products={Pf:1,protein:1},massaction=True,rate=k2,restrict_to=promoter_site)
R3 = pyurdme.Reaction(name="R3",reactants={Pf:1},products={Pf:1,mRNA:1},massaction=True,rate=alpha_m,restrict_to=promoter_site)
R4 = pyurdme.Reaction(name="R4",reactants={Po:1},products={Po:1,mRNA:1},massaction=True,rate=alpha_m_gamma,restrict_to=promoter_site)
R5 = pyurdme.Reaction(name="R5",reactants={mRNA:1},products={mRNA:1,protein:1},massaction=True,rate=alpha_p,restrict_to=cytoplasm)
R6 = pyurdme.Reaction(name="R6",reactants={mRNA:1},products={},massaction=True,rate=mu_m)
R7 = pyurdme.Reaction(name="R7",reactants={protein:1},products={},massaction=True,rate=mu_p)
self.add_reaction([R1,R2,R3,R4,R5,R6,R7])
#Restrict to promoter_site
self.restrict(Po,promoter_site)
self.restrict(Pf,promoter_site)
#Distribute molecules over the mesh
self.set_initial_condition_place_near({Pf:1},[0,0,0])
self.set_initial_condition_scatter({protein:60},cytoplasm)
self.set_initial_condition_scatter({mRNA:10},nucleus)
self.timespan(range(1200))
if __name__=="__main__":
model = hes1(model_name="hes1")
result = model.run(report_level=1)
protein = result.get_species("protein")
proteinsum = numpy.sum(protein,axis=1)
plt.plot(model.tspan,proteinsum,'r')
mRNA = result.get_species("mRNA")
mRNAsum=numpy.sum(mRNA[:],axis=1)
plt.plot(model.tspan,mRNAsum,'b')
plt.show()
#print 'Writing species "protein" to folder "proteinOut"'
#result.export_to_vtk(species='protein',folder_name='proteinOut')
| gpl-3.0 |
IgowWang/MyKaggle | BagOfWordsMeetsBagsOfPopcorn/loadData.py | 1 | 1453 | __author__ = 'Igor'
import pandas as pd
import nltk
from nltk.corpus import stopwords
import re
from bs4 import BeautifulSoup
TRAIN_FILE_PATH = "data/labeledTrainData.tsv"
TEST_FILE_PATH = "data/testData.tsv"
def load(test=False, remove_stopwords=False):
if test:
path = TEST_FILE_PATH
else:
path = TRAIN_FILE_PATH
data = pd.read_csv(path, header=0, delimiter="\t", quoting=3)
num_reviews = data["review"].size
clean_train_reviews = []
for i in range(num_reviews):
if ((i + 1) % 1000 == 0):
print("Review %d of %d" % (i + 1, num_reviews))
clean_train_reviews.append(review_to_words(data["review"][i], remove_stopwords))
return data, clean_train_reviews
def review_to_words(raw_review, remove_stopwords=False):
'''
将影评转换为词
:param raw_review:
:return:
'''
# 去除HTML标记
review_text = BeautifulSoup(raw_review,"lxml").get_text()
# 去除非文字信息
letters_only = re.sub(r"[^a-zA-Z]", " ", review_text)
# 转换成小写且按空格分隔
words = letters_only.lower().split()
# 在Python中查找集合的速度比查找列表的速度更快
if remove_stopwords:
stops = set(stopwords.words("english"))
# 去除停用词
words = [w for w in words if not w in stops]
# 用空格连接单词,返回一个字符串
return words
| apache-2.0 |
montilab/Hydra | build/scripts-2.7/run_bamqc.py | 2 | 16391 | #Copyright 2015 Daniel Gusenleitner, Stefano Monti
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
Usage: python2.7 bam_qc.py -i input_file.bam -o outdir
-h help
-o output_dir
-i input_file.bam *[No default value]
"""
def extract_stats(input_file):
#open bam file
bam_file = pysam.Samfile(input_file, "rb")
#counters
total_aligned_reads = 0
unique_aligned_reads = 0
is_singleton = 0
is_paired = 0
is_proper_pair = 0
is_unmapped = 0
num_unique_mismatches = [0]*5
num_multiple_mismatches = [0.0]*5
num_multiread = [0.0]*20
delet = False
insert = False
spliced = False
reads_with_deletions = 0
spliced_reads = 0
reads_with_inserts = 0
non_spliced_reads = 0
unique_reads_with_deletions = 0
unique_spliced_reads = 0
unique_reads_with_inserts = 0
unique_non_spliced_reads = 0
#tag variables
NH = 0
NM = 0
XS = 0
idx = 0
for read in bam_file:
if read.cigarstring != None:
#get all the relevant tags
for tag in read.tags:
if tag[0] == 'NH':
NH = tag[1]
if tag[0] == 'NM':
NM = tag[1]
if NH == 0:
NH = 1
#number of aligned reads
total_aligned_reads += 1
unique_aligned_reads += 1/NH
#number of mismatches
if NH == 1:
if NM >= 4:
num_unique_mismatches[4] = num_unique_mismatches[4]+1
else:
num_unique_mismatches[NM] = num_unique_mismatches[NM]+1
else:
if NM >= 4:
num_multiple_mismatches[4] = num_multiple_mismatches[4]+(1.0/float(NH))
else:
num_multiple_mismatches[NM] = num_multiple_mismatches[NM]+(1.0/float(NH))
#number of multiple reads
if NH >= 20:
num_multiread[19] = num_multiread[19]+(1.0/float(NH))
else:
num_multiread[NH-1] = num_multiread[NH-1]+(1.0/float(NH))
#singletons, paired, proper paired, unmapped
is_singleton += int(not read.is_paired)
is_paired += int(read.is_paired)
is_proper_pair += int(read.is_proper_pair)
is_unmapped += int(read.is_unmapped)
#splicing, deletions, inserts
spliced = 'N' in read.cigarstring
insert = 'I' in read.cigarstring
delet = 'D' in read.cigarstring
#actual count
spliced_reads += int(spliced)
spliced_reads += int(spliced)
non_spliced_reads += int(not spliced)
reads_with_deletions += int(insert)
reads_with_inserts += int(delet)
#counting reads that are aligned multiple times only once
unique_spliced_reads += int(spliced)/NH
unique_non_spliced_reads += int(not spliced)/NH
unique_reads_with_deletions += int(insert)/NH
unique_reads_with_inserts += int(delet)/NH
if idx % 1000000 == 0:
print str(idx)+' reads done'
idx += 1
bam_file.close()
statistics = dict()
statistics['total_aligned_reads'] = total_aligned_reads
statistics['unique_aligned_reads'] = unique_aligned_reads
statistics['is_singleton'] = is_singleton
statistics['is_paired'] = is_paired
statistics['is_proper_pair'] = is_proper_pair
statistics['is_unmapped'] = is_unmapped
statistics['num_unique_mismatches'] = num_unique_mismatches
statistics['num_multiple_mismatches'] = num_multiple_mismatches
statistics['num_multiread'] = num_multiread
statistics['spliced_reads'] = spliced_reads
statistics['non_spliced_reads'] = non_spliced_reads
statistics['reads_with_inserts'] = reads_with_inserts
statistics['reads_with_deletions'] = reads_with_deletions
statistics['unique_spliced_reads'] = unique_spliced_reads
statistics['unique_non_spliced_reads'] = unique_non_spliced_reads
statistics['unique_reads_with_inserts'] = unique_reads_with_inserts
statistics['unique_reads_with_deletions'] = unique_reads_with_deletions
return statistics
def output_stats(stat, output_dir):
#write all stats into a file
handle = open(output_dir+'output.txt', 'w')
handle.write('total_aligned_reads \t'+str(stat['total_aligned_reads'])+'\n')
handle.write('unique_aligned_reads \t'+str(stat['unique_aligned_reads'])+'\n')
handle.write('is_singleton \t'+str(stat['is_singleton'])+'\n')
handle.write('is_paired \t'+str(stat['is_paired'])+'\n')
handle.write('is_proper_pair \t'+str(stat['is_proper_pair'])+'\n')
handle.write('is_unmapped \t'+str(stat['is_unmapped'])+'\n')
for i in range(len(stat['num_unique_mismatches'])):
handle.write('num_unique_mismatches '+str(i)+ \
'\t'+str(stat['num_unique_mismatches'][i])+'\n')
for i in range(len(stat['num_multiple_mismatches'])):
handle.write('num_multiple_mismatches '+str(i)+'\t'+ \
str(stat['num_multiple_mismatches'][i])+'\n')
for i in range(len(stat['num_multiread'])):
handle.write('num_multiread '+str(i+1)+'\t'+str(stat['num_multiread'][i])+'\n')
handle.write('spliced_reads \t'+str(stat['spliced_reads'])+'\n')
handle.write('non_spliced_reads \t'+str(stat['non_spliced_reads'])+'\n')
handle.write('reads_with_inserts \t'+str(stat['reads_with_inserts'])+'\n')
handle.write('reads_with_deletions \t'+str(stat['reads_with_deletions'])+'\n')
handle.write('unique_spliced_reads \t'+str(stat['unique_spliced_reads'])+'\n')
handle.write('unique_non_spliced_reads \t'+ \
str(stat['unique_non_spliced_reads'])+'\n')
handle.write('unique_reads_with_inserts \t'+ \
str(stat['unique_reads_with_inserts'])+'\n')
handle.write('unique_reads_with_deletions \t'+ \
str(stat['unique_reads_with_deletions'])+'\n')
handle.close()
def plot_mul_alignments(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_multiread']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_multiread']]
_ = plt.bar(index, val, bar_width,
alpha=opacity,
color='b',
label='Number of alignements ')
plt.xlabel('Number of alignments')
plt.ylabel('Counts (log10)')
plt.title('Distribution of reads with multiple alignments')
ticks = [str(i+1) for i in range(len(stat['num_multiread']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'multiple_alignments.png')
def plot_num_unique_mismatches(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_unique_mismatches']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_unique_mismatches']]
_ = plt.bar(index,
val,
bar_width,
alpha=opacity,
color='b')
plt.xlabel('Number of mismatches in uniquely aligned samples')
plt.ylabel('Counts (log10)')
plt.title('Distribution of mismatches in reads with unique alignments')
ticks = [str(i) for i in range(len(stat['num_unique_mismatches']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'num_unique_mismatches.png')
def number_of_multiple_mismatches(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_multiple_mismatches']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_multiple_mismatches']]
_ = plt.bar(index,
val,
bar_width,
alpha=opacity,
color='b')
plt.xlabel('Number of mismatches in multiple aligned samples')
plt.ylabel('Counts (log10)')
plt.title('Distribution of mismatches in reads with multiple alignments')
ticks = [str(i) for i in range(len(stat['num_multiple_mismatches']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'num_multiple_mismatches.png')
def create_html(stat, output_dir):
handle = open(output_dir+'sample_stats.html', 'w')
#output a table with all the counts
handle.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '+ \
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +\
<head><title></title></head><body>\n')
handle.write('<center><br><h1>Sample overview</h1>')
#table
handle.write('<table id="one-column-emphasis">\n')
handle.write('<thead><tr><th> </th><th>Count</th><th>Percentage</th></tr></thead>\n')
#total number + unique / multiple aligned
handle.write('<tr><td>Total number of aligned reads</td><td>'+ \
str(int(stat['total_aligned_reads']))+'</td><td>'+ \
str(100*round(float(stat['total_aligned_reads'])/ + \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of uniquely aligned reads</td><td>'+ \
str(int(stat['num_multiread'][0]))+'</td><td>'+ \
str(100*round(float(stat['num_multiread'][0])/ +\
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
multi_read = stat['total_aligned_reads']-stat['num_multiread'][0]
handle.write('<tr><td>Number of multiple aligned reads</td><td>'+ \
str(int(multi_read))+'</td><td>'+str(100*round(float(multi_read)\
/float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr> <td></td><td> </td><td> </td></tr>\n')
#mismatches within uniquely aligned
handle.write('<tr><td>Number of perfect matches within uniquely aligned reads</td><td>'+ \
str(int(stat['num_unique_mismatches'][0]))+'</td><td>'+ \
str(100*round(float(stat['num_unique_mismatches'][0])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
uniq_read_multi_mm = stat['num_multiread'][0]-stat['num_unique_mismatches'][0]
handle.write('<tr><td>Number of uniquely aligned reads with mismatches</td><td>'+\
str(int(uniq_read_multi_mm))+'</td><td>'+ \
str(100*round(float(uniq_read_multi_mm)/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr> <td></td><td> </td> <td> </td></tr>\n')
#mismatches within uniquely aligned
handle.write('<tr><td>Number of perfect matches within multiple aligned '+ \
'reads</td><td>'+str(int(stat['num_multiple_mismatches'][0]))+ \
'</td><td>'+str(100*round(float(stat['num_multiple_mismatches'][0])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
mul_read_multi_mm = multi_read-stat['num_multiple_mismatches'][0]
handle.write('<tr><td>Number of multiple aligned reads with mismatches</td><td>'+ \
str(int(mul_read_multi_mm))+'</td><td>'+ \
str(100*round(float(mul_read_multi_mm)/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td> </td><td> </td><td> </td></tr>\n')
#paired / singleton / ...
handle.write('<tr><td>Number of singleton reads</td><td>'+ \
str(stat['is_singleton'])+'</td><td>'+ \
str(100*round(float(stat['is_singleton'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of paired reads</td><td>'+str(stat['is_paired'])+ \
'</td><td>'+str(100*round(float(stat['is_paired'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of proper paired reads</td><td>'+ \
str(stat['is_proper_pair'])+'</td><td>'+ \
str(100*round(float(stat['is_proper_pair'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of unmapped reads</td><td>'+ \
str(stat['is_unmapped'])+'</td><td>'+ \
str(100*round(float(stat['is_unmapped'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td> </td><td> </td><td> </td></tr>\n')
#spliced / inserts / deletions
handle.write('<tr><td>Number of spliced reads</td><td>'+ \
str(stat['spliced_reads'])+'</td><td>'+ \
str(100*round(float(stat['spliced_reads'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of reads with inserts</td><td>'+ \
str(stat['reads_with_inserts'])+'</td><td>'+ \
str(100*round(float(stat['reads_with_inserts'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of reads with deletions</td><td>'+ \
str(stat['reads_with_deletions'])+'</td><td>'+ \
str(100*round(float(stat['reads_with_deletions'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('</table><br><br><br><br>\n')
#add figures
handle.write('<img src="multiple_alignments.png" '+ \
'alt="multiple_alignments"><br><br><br><br>\n')
handle.write('<img src="num_unique_mismatches.png" '+ \
'alt="num_unique_mismatches"><br><br><br><br>\n')
handle.write('<img src="num_multiple_mismatches.png" a'+ \
'lt="num_multiple_mismatches"><center><br><br><br><br>\n\n\n')
handle.write('<style>#one-column-emphasis{font-family:"Lucida Sans Unicode",'+ \
' "Lucida Grande", Sans-Serif;font-size:12px;width:480px;'+ \
'text-align:left;border-collapse:collapse;margin:20px;}'+ \
'#one-column-emphasis th{font-size:14px;font-weight:normal;'+ \
'color:#039;padding:12px 15px;}#one-column-emphasis '+ \
'td{color:#669;border-top:1px solid #e8edff;padding:10px 15px;}'+\
'.oce-first{background:#d0dafd;border-right:10px solid '+ \
'transparent;border-left:10px solid transparent;}'+ \
'#one-column-emphasis tr:hover td{color:#339;'+ \
'background:#eff2ff;}</style></body>\n')
handle.close()
def make_report(stat, output_dir):
plot_mul_alignments(stat, output_dir)
plot_num_unique_mismatches(stat, output_dir)
number_of_multiple_mismatches(stat, output_dir)
create_html(stat, output_dir)
if __name__ == "__main__":
## Import modules
import pysam
import sys
import getopt
import json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
import pylab
## Check arguments
if len(sys.argv) < 5:
print __doc__
sys.exit(0)
optlist, cmdlist = getopt.getopt(sys.argv[1:], 'hi:o:')
for opt in optlist:
if opt[0] == '-h':
print __doc__; sys.exit(0)
if opt[0] == '-i':
input_filename = opt[1]
if opt[0] == '-o':
output_directory = opt[1]
#extract stats from bam file
stats = extract_stats(input_filename)
#dump stats into a text file
output_stats(stats, output_directory)
#create a report for a single sample
make_report(stats, output_directory)
#dump stats into a json file
with open(output_directory+'stats.json', 'w') as f:
json.dump(stats, f)
| apache-2.0 |
GuessWhoSamFoo/pandas | pandas/tests/internals/test_internals.py | 1 | 48902 | # -*- coding: utf-8 -*-
# pylint: disable=W0102
from datetime import date, datetime
from distutils.version import LooseVersion
import itertools
import operator
import re
import sys
import numpy as np
import pytest
from pandas._libs.internals import BlockPlacement
from pandas.compat import OrderedDict, lrange, u, zip
import pandas as pd
from pandas import (
Categorical, DataFrame, DatetimeIndex, Index, MultiIndex, Series,
SparseArray)
import pandas.core.algorithms as algos
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.internals import BlockManager, SingleBlockManager, make_block
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal, randn)
# in 3.6.1 a c-api slicing function changed, see src/compat_helper.h
PY361 = LooseVersion(sys.version) >= LooseVersion('3.6.1')
@pytest.fixture
def mgr():
return create_mgr(
'a: f8; b: object; c: f8; d: object; e: f8;'
'f: bool; g: i8; h: complex; i: datetime-1; j: datetime-2;'
'k: M8[ns, US/Eastern]; l: M8[ns, CET];')
def assert_block_equal(left, right):
tm.assert_numpy_array_equal(left.values, right.values)
assert left.dtype == right.dtype
assert isinstance(left.mgr_locs, BlockPlacement)
assert isinstance(right.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(left.mgr_locs.as_array,
right.mgr_locs.as_array)
def get_numeric_mat(shape):
arr = np.arange(shape[0])
return np.lib.stride_tricks.as_strided(x=arr, shape=shape, strides=(
arr.itemsize, ) + (0, ) * (len(shape) - 1)).copy()
N = 10
def create_block(typestr, placement, item_shape=None, num_offset=0):
"""
Supported typestr:
* float, f8, f4, f2
* int, i8, i4, i2, i1
* uint, u8, u4, u2, u1
* complex, c16, c8
* bool
* object, string, O
* datetime, dt, M8[ns], M8[ns, tz]
* timedelta, td, m8[ns]
* sparse (SparseArray with fill_value=0.0)
* sparse_na (SparseArray with fill_value=np.nan)
* category, category2
"""
placement = BlockPlacement(placement)
num_items = len(placement)
if item_shape is None:
item_shape = (N, )
shape = (num_items, ) + item_shape
mat = get_numeric_mat(shape)
if typestr in ('float', 'f8', 'f4', 'f2', 'int', 'i8', 'i4', 'i2', 'i1',
'uint', 'u8', 'u4', 'u2', 'u1'):
values = mat.astype(typestr) + num_offset
elif typestr in ('complex', 'c16', 'c8'):
values = 1.j * (mat.astype(typestr) + num_offset)
elif typestr in ('object', 'string', 'O'):
values = np.reshape(['A%d' % i for i in mat.ravel() + num_offset],
shape)
elif typestr in ('b', 'bool', ):
values = np.ones(shape, dtype=np.bool_)
elif typestr in ('datetime', 'dt', 'M8[ns]'):
values = (mat * 1e9).astype('M8[ns]')
elif typestr.startswith('M8[ns'):
# datetime with tz
m = re.search(r'M8\[ns,\s*(\w+\/?\w*)\]', typestr)
assert m is not None, "incompatible typestr -> {0}".format(typestr)
tz = m.groups()[0]
assert num_items == 1, "must have only 1 num items for a tz-aware"
values = DatetimeIndex(np.arange(N) * 1e9, tz=tz)
elif typestr in ('timedelta', 'td', 'm8[ns]'):
values = (mat * 1).astype('m8[ns]')
elif typestr in ('category', ):
values = Categorical([1, 1, 2, 2, 3, 3, 3, 3, 4, 4])
elif typestr in ('category2', ):
values = Categorical(['a', 'a', 'a', 'a', 'b', 'b', 'c', 'c', 'c', 'd'
])
elif typestr in ('sparse', 'sparse_na'):
# FIXME: doesn't support num_rows != 10
assert shape[-1] == 10
assert all(s == 1 for s in shape[:-1])
if typestr.endswith('_na'):
fill_value = np.nan
else:
fill_value = 0.0
values = SparseArray([fill_value, fill_value, 1, 2, 3, fill_value,
4, 5, fill_value, 6], fill_value=fill_value)
arr = values.sp_values.view()
arr += (num_offset - 1)
else:
raise ValueError('Unsupported typestr: "%s"' % typestr)
return make_block(values, placement=placement, ndim=len(shape))
def create_single_mgr(typestr, num_rows=None):
if num_rows is None:
num_rows = N
return SingleBlockManager(
create_block(typestr, placement=slice(0, num_rows), item_shape=()),
np.arange(num_rows))
def create_mgr(descr, item_shape=None):
"""
Construct BlockManager from string description.
String description syntax looks similar to np.matrix initializer. It looks
like this::
a,b,c: f8; d,e,f: i8
Rules are rather simple:
* see list of supported datatypes in `create_block` method
* components are semicolon-separated
* each component is `NAME,NAME,NAME: DTYPE_ID`
* whitespace around colons & semicolons are removed
* components with same DTYPE_ID are combined into single block
* to force multiple blocks with same dtype, use '-SUFFIX'::
'a:f8-1; b:f8-2; c:f8-foobar'
"""
if item_shape is None:
item_shape = (N, )
offset = 0
mgr_items = []
block_placements = OrderedDict()
for d in descr.split(';'):
d = d.strip()
if not len(d):
continue
names, blockstr = d.partition(':')[::2]
blockstr = blockstr.strip()
names = names.strip().split(',')
mgr_items.extend(names)
placement = list(np.arange(len(names)) + offset)
try:
block_placements[blockstr].extend(placement)
except KeyError:
block_placements[blockstr] = placement
offset += len(names)
mgr_items = Index(mgr_items)
blocks = []
num_offset = 0
for blockstr, placement in block_placements.items():
typestr = blockstr.split('-')[0]
blocks.append(create_block(typestr,
placement,
item_shape=item_shape,
num_offset=num_offset, ))
num_offset += len(placement)
return BlockManager(sorted(blocks, key=lambda b: b.mgr_locs[0]),
[mgr_items] + [np.arange(n) for n in item_shape])
class TestBlock(object):
def setup_method(self, method):
# self.fblock = get_float_ex() # a,c,e
# self.cblock = get_complex_ex() #
# self.oblock = get_obj_ex()
# self.bool_block = get_bool_ex()
# self.int_block = get_int_ex()
self.fblock = create_block('float', [0, 2, 4])
self.cblock = create_block('complex', [7])
self.oblock = create_block('object', [1, 3])
self.bool_block = create_block('bool', [5])
self.int_block = create_block('int', [6])
def test_constructor(self):
int32block = create_block('i4', [0])
assert int32block.dtype == np.int32
def test_pickle(self):
def _check(blk):
assert_block_equal(tm.round_trip_pickle(blk), blk)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_mgr_locs(self):
assert isinstance(self.fblock.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array,
np.array([0, 2, 4], dtype=np.int64))
def test_attrs(self):
assert self.fblock.shape == self.fblock.values.shape
assert self.fblock.dtype == self.fblock.values.dtype
assert len(self.fblock) == len(self.fblock.values)
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = Index(['e', 'a', 'b', 'd', 'f'])
ablock = make_block(avals, ref_cols.get_indexer(['e', 'b']))
bblock = make_block(bvals, ref_cols.get_indexer(['a', 'd']))
merged = ablock.merge(bblock)
tm.assert_numpy_array_equal(merged.mgr_locs.as_array,
np.array([0, 1, 2, 3], dtype=np.int64))
tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals))
tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals))
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
assert cop is not self.fblock
assert_block_equal(self.fblock, cop)
def test_reindex_index(self):
pass
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.copy()
newb.delete(0)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([2, 4], dtype=np.int64))
assert (newb.values[0] == 1).all()
newb = self.fblock.copy()
newb.delete(1)
assert isinstance(newb.mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 4], dtype=np.int64))
assert (newb.values[1] == 2).all()
newb = self.fblock.copy()
newb.delete(2)
tm.assert_numpy_array_equal(newb.mgr_locs.as_array,
np.array([0, 2], dtype=np.int64))
assert (newb.values[1] == 1).all()
newb = self.fblock.copy()
with pytest.raises(Exception):
newb.delete(3)
def test_make_block_same_class(self):
# issue 19431
block = create_block('M8[ns, US/Eastern]', [3])
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
block.make_block_same_class(block.values,
dtype=block.values.dtype)
class TestDatetimeBlock(object):
def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
none_coerced = block._try_coerce_args(block.values, None)[1]
assert pd.Timestamp(none_coerced) is pd.NaT
# coerce different types of date bojects
vals = (np.datetime64('2010-10-10'), datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
coerced = block._try_coerce_args(block.values, val)[1]
assert np.int64 == type(coerced)
assert pd.Timestamp('2010-10-10') == pd.Timestamp(coerced)
class TestBlockManager(object):
def test_constructor_corner(self):
pass
def test_attrs(self):
mgr = create_mgr('a,b,c: f8-1; d,e,f: f8-2')
assert mgr.nblocks == 2
assert len(mgr) == 6
def test_is_mixed_dtype(self):
assert not create_mgr('a,b:f8').is_mixed_type
assert not create_mgr('a:f8-1; b:f8-2').is_mixed_type
assert create_mgr('a,b:f8; c,d: f4').is_mixed_type
assert create_mgr('a,b:f8; c,d: object').is_mixed_type
def test_duplicate_ref_loc_failure(self):
tmp_mgr = create_mgr('a:bool; a: f8')
axes, blocks = tmp_mgr.axes, tmp_mgr.blocks
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([0])
# test trying to create block manager with overlapping ref locs
with pytest.raises(AssertionError):
BlockManager(blocks, axes)
blocks[0].mgr_locs = np.array([0])
blocks[1].mgr_locs = np.array([1])
mgr = BlockManager(blocks, axes)
mgr.iget(1)
def test_contains(self, mgr):
assert 'a' in mgr
assert 'baz' not in mgr
def test_pickle(self, mgr):
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
# share ref_items
# assert mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items
# GH2431
assert hasattr(mgr2, "_is_consolidated")
assert hasattr(mgr2, "_known_consolidated")
# reset to False on load
assert not mgr2._is_consolidated
assert not mgr2._known_consolidated
def test_non_unique_pickle(self):
mgr = create_mgr('a,a,a:f8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
mgr = create_mgr('a: f8; a: i8')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
def test_categorical_block_pickle(self):
mgr = create_mgr('a: category')
mgr2 = tm.round_trip_pickle(mgr)
assert_frame_equal(DataFrame(mgr), DataFrame(mgr2))
smgr = create_single_mgr('category')
smgr2 = tm.round_trip_pickle(smgr)
assert_series_equal(Series(smgr), Series(smgr2))
def test_get(self):
cols = Index(list('abc'))
values = np.random.rand(3, 3)
block = make_block(values=values.copy(), placement=np.arange(3))
mgr = BlockManager(blocks=[block], axes=[cols, np.arange(3)])
assert_almost_equal(mgr.get('a', fastpath=False), values[0])
assert_almost_equal(mgr.get('b', fastpath=False), values[1])
assert_almost_equal(mgr.get('c', fastpath=False), values[2])
assert_almost_equal(mgr.get('a').internal_values(), values[0])
assert_almost_equal(mgr.get('b').internal_values(), values[1])
assert_almost_equal(mgr.get('c').internal_values(), values[2])
def test_set(self):
mgr = create_mgr('a,b,c: int', item_shape=(3, ))
mgr.set('d', np.array(['foo'] * 3))
mgr.set('b', np.array(['bar'] * 3))
tm.assert_numpy_array_equal(mgr.get('a').internal_values(),
np.array([0] * 3))
tm.assert_numpy_array_equal(mgr.get('b').internal_values(),
np.array(['bar'] * 3, dtype=np.object_))
tm.assert_numpy_array_equal(mgr.get('c').internal_values(),
np.array([2] * 3))
tm.assert_numpy_array_equal(mgr.get('d').internal_values(),
np.array(['foo'] * 3, dtype=np.object_))
def test_set_change_dtype(self, mgr):
mgr.set('baz', np.zeros(N, dtype=bool))
mgr.set('baz', np.repeat('foo', N))
assert mgr.get('baz').dtype == np.object_
mgr2 = mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
assert mgr2.get('baz').dtype == np.object_
mgr2.set('quux', randn(N).astype(int))
assert mgr2.get('quux').dtype == np.int_
mgr2.set('quux', randn(N))
assert mgr2.get('quux').dtype == np.float_
def test_set_change_dtype_slice(self): # GH8850
cols = MultiIndex.from_tuples([('1st', 'a'), ('2nd', 'b'), ('3rd', 'c')
])
df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols)
df['2nd'] = df['2nd'] * 2.0
blocks = df._to_dict_of_blocks()
assert sorted(blocks.keys()) == ['float64', 'int64']
assert_frame_equal(blocks['float64'], DataFrame(
[[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]))
assert_frame_equal(blocks['int64'], DataFrame(
[[3], [6]], columns=cols[2:]))
def test_copy(self, mgr):
cp = mgr.copy(deep=False)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# view assertion
assert cp_blk.equals(blk)
if isinstance(blk.values, np.ndarray):
assert cp_blk.values.base is blk.values.base
else:
# DatetimeTZBlock has DatetimeIndex values
assert cp_blk.values._data.base is blk.values._data.base
cp = mgr.copy(deep=True)
for blk, cp_blk in zip(mgr.blocks, cp.blocks):
# copy assertion we either have a None for a base or in case of
# some blocks it is an array (e.g. datetimetz), but was copied
assert cp_blk.equals(blk)
if not isinstance(cp_blk.values, np.ndarray):
assert cp_blk.values._data.base is not blk.values._data.base
else:
assert cp_blk.values.base is None and blk.values.base is None
def test_sparse(self):
mgr = create_mgr('a: sparse-1; b: sparse-2')
# what to test here?
assert mgr.as_array().dtype == np.float64
def test_sparse_mixed(self):
mgr = create_mgr('a: sparse-1; b: sparse-2; c: f8')
assert len(mgr.blocks) == 3
assert isinstance(mgr, BlockManager)
# what to test here?
def test_as_array_float(self):
mgr = create_mgr('c: f4; d: f2; e: f8')
assert mgr.as_array().dtype == np.float64
mgr = create_mgr('c: f4; d: f2')
assert mgr.as_array().dtype == np.float32
def test_as_array_int_bool(self):
mgr = create_mgr('a: bool-1; b: bool-2')
assert mgr.as_array().dtype == np.bool_
mgr = create_mgr('a: i8-1; b: i8-2; c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int64
mgr = create_mgr('c: i4; d: i2; e: u1')
assert mgr.as_array().dtype == np.int32
def test_as_array_datetime(self):
mgr = create_mgr('h: datetime-1; g: datetime-2')
assert mgr.as_array().dtype == 'M8[ns]'
def test_as_array_datetime_tz(self):
mgr = create_mgr('h: M8[ns, US/Eastern]; g: M8[ns, CET]')
assert mgr.get('h').dtype == 'datetime64[ns, US/Eastern]'
assert mgr.get('g').dtype == 'datetime64[ns, CET]'
assert mgr.as_array().dtype == 'object'
def test_astype(self):
# coerce all
mgr = create_mgr('c: f4; d: f2; e: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t)
assert tmgr.get('c').dtype.type == t
assert tmgr.get('d').dtype.type == t
assert tmgr.get('e').dtype.type == t
# mixed
mgr = create_mgr('a,b: object; c: bool; d: datetime;'
'e: f4; f: f2; g: f8')
for t in ['float16', 'float32', 'float64', 'int32', 'int64']:
t = np.dtype(t)
tmgr = mgr.astype(t, errors='ignore')
assert tmgr.get('c').dtype.type == t
assert tmgr.get('e').dtype.type == t
assert tmgr.get('f').dtype.type == t
assert tmgr.get('g').dtype.type == t
assert tmgr.get('a').dtype.type == np.object_
assert tmgr.get('b').dtype.type == np.object_
if t != np.int64:
assert tmgr.get('d').dtype.type == np.datetime64
else:
assert tmgr.get('d').dtype.type == t
def test_convert(self):
def _compare(old_mgr, new_mgr):
""" compare the blocks, numeric compare ==, object don't """
old_blocks = set(old_mgr.blocks)
new_blocks = set(new_mgr.blocks)
assert len(old_blocks) == len(new_blocks)
# compare non-numeric
for b in old_blocks:
found = False
for nb in new_blocks:
if (b.values == nb.values).all():
found = True
break
assert found
for b in new_blocks:
found = False
for ob in old_blocks:
if (b.values == ob.values).all():
found = True
break
assert found
# noops
mgr = create_mgr('f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
mgr = create_mgr('a, b: object; f: i8; g: f8')
new_mgr = mgr.convert()
_compare(mgr, new_mgr)
# convert
mgr = create_mgr('a,b,foo: object; f: i8; g: f8')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
mgr = create_mgr('a,b,foo: object; f: i4; bool: bool; dt: datetime;'
'i: i8; g: f8; h: f2')
mgr.set('a', np.array(['1'] * N, dtype=np.object_))
mgr.set('b', np.array(['2.'] * N, dtype=np.object_))
mgr.set('foo', np.array(['foo.'] * N, dtype=np.object_))
new_mgr = mgr.convert(numeric=True)
assert new_mgr.get('a').dtype == np.int64
assert new_mgr.get('b').dtype == np.float64
assert new_mgr.get('foo').dtype == np.object_
assert new_mgr.get('f').dtype == np.int32
assert new_mgr.get('bool').dtype == np.bool_
assert new_mgr.get('dt').dtype.type, np.datetime64
assert new_mgr.get('i').dtype == np.int64
assert new_mgr.get('g').dtype == np.float64
assert new_mgr.get('h').dtype == np.float16
def test_interleave(self):
# self
for dtype in ['f8', 'i8', 'object', 'bool', 'complex', 'M8[ns]',
'm8[ns]']:
mgr = create_mgr('a: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
mgr = create_mgr('a: {0}; b: {0}'.format(dtype))
assert mgr.as_array().dtype == dtype
# will be converted according the actual dtype of the underlying
mgr = create_mgr('a: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category')
assert mgr.as_array().dtype == 'i8'
mgr = create_mgr('a: category; b: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: category2; b: category2')
assert mgr.as_array().dtype == 'object'
# combinations
mgr = create_mgr('a: f8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f8; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8')
assert mgr.as_array().dtype == 'f8'
mgr = create_mgr('a: f4; b: i8; d: object')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: bool; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: complex')
assert mgr.as_array().dtype == 'complex'
mgr = create_mgr('a: f8; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: category')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: bool')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: m8[ns]; b: i8')
assert mgr.as_array().dtype == 'object'
mgr = create_mgr('a: M8[ns]; b: m8[ns]')
assert mgr.as_array().dtype == 'object'
def test_interleave_non_unique_cols(self):
df = DataFrame([
[pd.Timestamp('20130101'), 3.5],
[pd.Timestamp('20130102'), 4.5]],
columns=['x', 'x'],
index=[1, 2])
df_unique = df.copy()
df_unique.columns = ['x', 'y']
assert df_unique.values.shape == df.values.shape
tm.assert_numpy_array_equal(df_unique.values[0], df.values[0])
tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
def test_consolidate(self):
pass
def test_consolidate_ordering_issues(self, mgr):
mgr.set('f', randn(N))
mgr.set('d', randn(N))
mgr.set('b', randn(N))
mgr.set('g', randn(N))
mgr.set('h', randn(N))
# we have datetime/tz blocks in mgr
cons = mgr.consolidate()
assert cons.nblocks == 4
cons = mgr.consolidate().get_numeric_data()
assert cons.nblocks == 1
assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement)
tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array,
np.arange(len(cons.items), dtype=np.int64))
def test_reindex_index(self):
pass
def test_reindex_items(self):
# mgr is not consolidated, f8 & f8-2 blocks
mgr = create_mgr('a: f8; b: i8; c: f8; d: i8; e: f8;'
'f: bool; g: f8-2')
reindexed = mgr.reindex_axis(['g', 'c', 'a', 'd'], axis=0)
assert reindexed.nblocks == 2
tm.assert_index_equal(reindexed.items, pd.Index(['g', 'c', 'a', 'd']))
assert_almost_equal(
mgr.get('g', fastpath=False), reindexed.get('g', fastpath=False))
assert_almost_equal(
mgr.get('c', fastpath=False), reindexed.get('c', fastpath=False))
assert_almost_equal(
mgr.get('a', fastpath=False), reindexed.get('a', fastpath=False))
assert_almost_equal(
mgr.get('d', fastpath=False), reindexed.get('d', fastpath=False))
assert_almost_equal(
mgr.get('g').internal_values(),
reindexed.get('g').internal_values())
assert_almost_equal(
mgr.get('c').internal_values(),
reindexed.get('c').internal_values())
assert_almost_equal(
mgr.get('a').internal_values(),
reindexed.get('a').internal_values())
assert_almost_equal(
mgr.get('d').internal_values(),
reindexed.get('d').internal_values())
def test_multiindex_xs(self):
mgr = create_mgr('a,b,c: f8; d,e,f: i8')
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
mgr.set_axis(1, index)
result = mgr.xs('bar', axis=1)
assert result.shape == (6, 2)
assert result.axes[1][0] == ('bar', 'one')
assert result.axes[1][1] == ('bar', 'two')
def test_get_numeric_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([1, 2, 3], dtype=np.object_))
numeric = mgr.get_numeric_data()
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
assert_almost_equal(
mgr.get('float', fastpath=False), numeric.get('float',
fastpath=False))
assert_almost_equal(
mgr.get('float').internal_values(),
numeric.get('float').internal_values())
# Check sharing
numeric.set('float', np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
numeric2 = mgr.get_numeric_data(copy=True)
tm.assert_index_equal(numeric.items,
pd.Index(['int', 'float', 'complex', 'bool']))
numeric2.set('float', np.array([1000., 2000., 3000.]))
assert_almost_equal(
mgr.get('float', fastpath=False), np.array([100., 200., 300.]))
assert_almost_equal(
mgr.get('float').internal_values(), np.array([100., 200., 300.]))
def test_get_bool_data(self):
mgr = create_mgr('int: int; float: float; complex: complex;'
'str: object; bool: bool; obj: object; dt: datetime',
item_shape=(3, ))
mgr.set('obj', np.array([True, False, True], dtype=np.object_))
bools = mgr.get_bool_data()
tm.assert_index_equal(bools.items, pd.Index(['bool']))
assert_almost_equal(mgr.get('bool', fastpath=False),
bools.get('bool', fastpath=False))
assert_almost_equal(
mgr.get('bool').internal_values(),
bools.get('bool').internal_values())
bools.set('bool', np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
# Check sharing
bools2 = mgr.get_bool_data(copy=True)
bools2.set('bool', np.array([False, True, False]))
tm.assert_numpy_array_equal(mgr.get('bool', fastpath=False),
np.array([True, False, True]))
tm.assert_numpy_array_equal(mgr.get('bool').internal_values(),
np.array([True, False, True]))
def test_unicode_repr_doesnt_raise(self):
repr(create_mgr(u('b,\u05d0: object')))
def test_missing_unicode_key(self):
df = DataFrame({"a": [1]})
try:
df.loc[:, u("\u05d0")] # should not raise UnicodeEncodeError
except KeyError:
pass # this is the expected exception
def test_equals(self):
# unique items
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
bm1 = create_mgr('a,a,a: i8-1; b,b,b: i8-2')
bm2 = BlockManager(bm1.blocks[::-1], bm1.axes)
assert bm1.equals(bm2)
def test_equals_block_order_different_dtypes(self):
# GH 9330
mgr_strings = [
"a:i8;b:f8", # basic case
"a:i8;b:f8;c:c8;d:b", # many types
"a:i8;e:dt;f:td;g:string", # more types
"a:i8;b:category;c:category2;d:category2", # categories
"c:sparse;d:sparse_na;b:f8", # sparse
]
for mgr_string in mgr_strings:
bm = create_mgr(mgr_string)
block_perms = itertools.permutations(bm.blocks)
for bm_perm in block_perms:
bm_this = BlockManager(bm_perm, bm.axes)
assert bm.equals(bm_this)
assert bm_this.equals(bm)
def test_single_mgr_ctor(self):
mgr = create_single_mgr('f8', num_rows=5)
assert mgr.as_array().tolist() == [0., 1., 2., 3., 4.]
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
bm1 = create_mgr('a,b,c: i8-1; d,e,f: i8-2')
for value in invalid_values:
with pytest.raises(ValueError):
bm1.replace_list([1], [2], inplace=value)
class TestIndexing(object):
# Nosetests-style data-driven tests.
#
# This test applies different indexing routines to block managers and
# compares the outcome to the result of same operations on np.ndarray.
#
# NOTE: sparse (SparseBlock with fill_value != np.nan) fail a lot of tests
# and are disabled.
MANAGERS = [
create_single_mgr('f8', N),
create_single_mgr('i8', N),
# 2-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N,)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N,)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N,)),
# 3-dim
create_mgr('a,b,c,d,e,f: f8', item_shape=(N, N)),
create_mgr('a,b,c,d,e,f: i8', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: string', item_shape=(N, N)),
create_mgr('a,b: f8; c,d: i8; e,f: f8', item_shape=(N, N)),
]
# MANAGERS = [MANAGERS[6]]
def test_get_slice(self):
def assert_slice_ok(mgr, axis, slobj):
# import pudb; pudb.set_trace()
mat = mgr.as_array()
# we maybe using an ndarray to test slicing and
# might not be the full length of the axis
if isinstance(slobj, np.ndarray):
ax = mgr.axes[axis]
if len(ax) and len(slobj) and len(slobj) != len(ax):
slobj = np.concatenate([slobj, np.zeros(
len(ax) - len(slobj), dtype=bool)])
sliced = mgr.get_slice(slobj, axis=axis)
mat_slobj = (slice(None), ) * axis + (slobj, )
tm.assert_numpy_array_equal(mat[mat_slobj], sliced.as_array(),
check_dtype=False)
tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# slice
assert_slice_ok(mgr, ax, slice(None))
assert_slice_ok(mgr, ax, slice(3))
assert_slice_ok(mgr, ax, slice(100))
assert_slice_ok(mgr, ax, slice(1, 4))
assert_slice_ok(mgr, ax, slice(3, 0, -2))
# boolean mask
assert_slice_ok(
mgr, ax, np.array([], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.ones(mgr.shape[ax], dtype=np.bool_))
assert_slice_ok(
mgr, ax,
np.zeros(mgr.shape[ax], dtype=np.bool_))
if mgr.shape[ax] >= 3:
assert_slice_ok(
mgr, ax,
np.arange(mgr.shape[ax]) % 3 == 0)
assert_slice_ok(
mgr, ax, np.array(
[True, True, False], dtype=np.bool_))
# fancy indexer
assert_slice_ok(mgr, ax, [])
assert_slice_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_slice_ok(mgr, ax, [0, 1, 2])
assert_slice_ok(mgr, ax, [-1, -2, -3])
def test_take(self):
def assert_take_ok(mgr, axis, indexer):
mat = mgr.as_array()
taken = mgr.take(indexer, axis)
tm.assert_numpy_array_equal(np.take(mat, indexer, axis),
taken.as_array(), check_dtype=False)
tm.assert_index_equal(mgr.axes[axis].take(indexer),
taken.axes[axis])
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
# take/fancy indexer
assert_take_ok(mgr, ax, [])
assert_take_ok(mgr, ax, [0, 0, 0])
assert_take_ok(mgr, ax, lrange(mgr.shape[ax]))
if mgr.shape[ax] >= 3:
assert_take_ok(mgr, ax, [0, 1, 2])
assert_take_ok(mgr, ax, [-1, -2, -3])
def test_reindex_axis(self):
def assert_reindex_axis_is_ok(mgr, axis, new_labels, fill_value):
mat = mgr.as_array()
indexer = mgr.axes[axis].get_indexer_for(new_labels)
reindexed = mgr.reindex_axis(new_labels, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(algos.take_nd(mat, indexer, axis,
fill_value=fill_value),
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index([]), fill_value)
assert_reindex_axis_is_ok(
mgr, ax, mgr.axes[ax],
fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 0, 0]], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']), fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][:-3], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][-3::-1], fill_value)
assert_reindex_axis_is_ok(
mgr, ax,
mgr.axes[ax][[0, 1, 2, 0, 1, 2]], fill_value)
def test_reindex_indexer(self):
def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer,
fill_value):
mat = mgr.as_array()
reindexed_mat = algos.take_nd(mat, indexer, axis,
fill_value=fill_value)
reindexed = mgr.reindex_indexer(new_labels, indexer, axis,
fill_value=fill_value)
tm.assert_numpy_array_equal(reindexed_mat,
reindexed.as_array(),
check_dtype=False)
tm.assert_index_equal(reindexed.axes[axis], new_labels)
for mgr in self.MANAGERS:
for ax in range(mgr.ndim):
for fill_value in (None, np.nan, 100.):
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index([]), [], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax], np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo'] * mgr.shape[ax]),
np.arange(mgr.shape[ax]), fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
mgr.axes[ax][::-1], np.arange(mgr.shape[ax]),
fill_value)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax],
np.arange(mgr.shape[ax])[::-1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 0, 0], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[-1, 0, -1], fill_value)
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', mgr.axes[ax][0], 'baz']),
[-1, -1, -1], fill_value)
if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax,
pd.Index(['foo', 'bar', 'baz']),
[0, 1, 2], fill_value)
# test_get_slice(slice_like, axis)
# take(indexer, axis)
# reindex_axis(new_labels, axis)
# reindex_indexer(new_labels, indexer, axis)
class TestBlockPlacement(object):
def test_slice_len(self):
assert len(BlockPlacement(slice(0, 4))) == 4
assert len(BlockPlacement(slice(0, 4, 2))) == 2
assert len(BlockPlacement(slice(0, 3, 2))) == 2
assert len(BlockPlacement(slice(0, 1, 2))) == 1
assert len(BlockPlacement(slice(1, 0, -1))) == 1
def test_zero_step_raises(self):
with pytest.raises(ValueError):
BlockPlacement(slice(1, 1, 0))
with pytest.raises(ValueError):
BlockPlacement(slice(1, 2, 0))
def test_unbounded_slice_raises(self):
def assert_unbounded_slice_error(slc):
with pytest.raises(ValueError, match="unbounded slice"):
BlockPlacement(slc)
assert_unbounded_slice_error(slice(None, None))
assert_unbounded_slice_error(slice(10, None))
assert_unbounded_slice_error(slice(None, None, -1))
assert_unbounded_slice_error(slice(None, 10, -1))
# These are "unbounded" because negative index will change depending on
# container shape.
assert_unbounded_slice_error(slice(-1, None))
assert_unbounded_slice_error(slice(None, -1))
assert_unbounded_slice_error(slice(-1, -1))
assert_unbounded_slice_error(slice(-1, None, -1))
assert_unbounded_slice_error(slice(None, -1, -1))
assert_unbounded_slice_error(slice(-1, -1, -1))
def test_not_slice_like_slices(self):
def assert_not_slice_like(slc):
assert not BlockPlacement(slc).is_slice_like
assert_not_slice_like(slice(0, 0))
assert_not_slice_like(slice(100, 0))
assert_not_slice_like(slice(100, 100, -1))
assert_not_slice_like(slice(0, 100, -1))
assert not BlockPlacement(slice(0, 0)).is_slice_like
assert not BlockPlacement(slice(100, 100)).is_slice_like
def test_array_to_slice_conversion(self):
def assert_as_slice_equals(arr, slc):
assert BlockPlacement(arr).as_slice == slc
assert_as_slice_equals([0], slice(0, 1, 1))
assert_as_slice_equals([100], slice(100, 101, 1))
assert_as_slice_equals([0, 1, 2], slice(0, 3, 1))
assert_as_slice_equals([0, 5, 10], slice(0, 15, 5))
assert_as_slice_equals([0, 100], slice(0, 200, 100))
assert_as_slice_equals([2, 1], slice(2, 0, -1))
if not PY361:
assert_as_slice_equals([2, 1, 0], slice(2, None, -1))
assert_as_slice_equals([100, 0], slice(100, None, -100))
def test_not_slice_like_arrays(self):
def assert_not_slice_like(arr):
assert not BlockPlacement(arr).is_slice_like
assert_not_slice_like([])
assert_not_slice_like([-1])
assert_not_slice_like([-1, -2, -3])
assert_not_slice_like([-10])
assert_not_slice_like([-1])
assert_not_slice_like([-1, 0, 1, 2])
assert_not_slice_like([-2, 0, 2, 4])
assert_not_slice_like([1, 0, -1])
assert_not_slice_like([1, 1, 1])
def test_slice_iter(self):
assert list(BlockPlacement(slice(0, 3))) == [0, 1, 2]
assert list(BlockPlacement(slice(0, 0))) == []
assert list(BlockPlacement(slice(3, 0))) == []
if not PY361:
assert list(BlockPlacement(slice(3, 0, -1))) == [3, 2, 1]
assert list(BlockPlacement(slice(3, None, -1))) == [3, 2, 1, 0]
def test_slice_to_array_conversion(self):
def assert_as_array_equals(slc, asarray):
tm.assert_numpy_array_equal(
BlockPlacement(slc).as_array,
np.asarray(asarray, dtype=np.int64))
assert_as_array_equals(slice(0, 3), [0, 1, 2])
assert_as_array_equals(slice(0, 0), [])
assert_as_array_equals(slice(3, 0), [])
assert_as_array_equals(slice(3, 0, -1), [3, 2, 1])
if not PY361:
assert_as_array_equals(slice(3, None, -1), [3, 2, 1, 0])
assert_as_array_equals(slice(31, None, -10), [31, 21, 11, 1])
def test_blockplacement_add(self):
bpl = BlockPlacement(slice(0, 5))
assert bpl.add(1).as_slice == slice(1, 6, 1)
assert bpl.add(np.arange(5)).as_slice == slice(0, 10, 2)
assert list(bpl.add(np.arange(5, 0, -1))) == [5, 5, 5, 5, 5]
def test_blockplacement_add_int(self):
def assert_add_equals(val, inc, result):
assert list(BlockPlacement(val).add(inc)) == result
assert_add_equals(slice(0, 0), 0, [])
assert_add_equals(slice(1, 4), 0, [1, 2, 3])
assert_add_equals(slice(3, 0, -1), 0, [3, 2, 1])
assert_add_equals([1, 2, 4], 0, [1, 2, 4])
assert_add_equals(slice(0, 0), 10, [])
assert_add_equals(slice(1, 4), 10, [11, 12, 13])
assert_add_equals(slice(3, 0, -1), 10, [13, 12, 11])
assert_add_equals([1, 2, 4], 10, [11, 12, 14])
assert_add_equals(slice(0, 0), -1, [])
assert_add_equals(slice(1, 4), -1, [0, 1, 2])
assert_add_equals([1, 2, 4], -1, [0, 1, 3])
with pytest.raises(ValueError):
BlockPlacement(slice(1, 4)).add(-10)
with pytest.raises(ValueError):
BlockPlacement([1, 2, 4]).add(-10)
if not PY361:
assert_add_equals(slice(3, 0, -1), -1, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 0, [2, 1, 0])
assert_add_equals(slice(2, None, -1), 10, [12, 11, 10])
with pytest.raises(ValueError):
BlockPlacement(slice(2, None, -1)).add(-1)
class DummyElement(object):
def __init__(self, value, dtype):
self.value = value
self.dtype = np.dtype(dtype)
def __array__(self):
return np.array(self.value, dtype=self.dtype)
def __str__(self):
return "DummyElement({}, {})".format(self.value, self.dtype)
def __repr__(self):
return str(self)
def astype(self, dtype, copy=False):
self.dtype = dtype
return self
def view(self, dtype):
return type(self)(self.value.view(dtype), dtype)
def any(self, axis=None):
return bool(self.value)
class TestCanHoldElement(object):
@pytest.mark.parametrize('value, dtype', [
(1, 'i8'),
(1.0, 'f8'),
(2**63, 'f8'),
(1j, 'complex128'),
(2**63, 'complex128'),
(True, 'bool'),
(np.timedelta64(20, 'ns'), '<m8[ns]'),
(np.datetime64(20, 'ns'), '<M8[ns]'),
])
@pytest.mark.parametrize('op', [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.mod,
operator.pow,
], ids=lambda x: x.__name__)
def test_binop_other(self, op, value, dtype):
skip = {(operator.add, 'bool'),
(operator.sub, 'bool'),
(operator.mul, 'bool'),
(operator.truediv, 'bool'),
(operator.mod, 'i8'),
(operator.mod, 'complex128'),
(operator.pow, 'bool')}
if (op, dtype) in skip:
pytest.skip("Invalid combination {},{}".format(op, dtype))
e = DummyElement(value, dtype)
s = pd.DataFrame({"A": [e.value, e.value]}, dtype=e.dtype)
invalid = {(operator.pow, '<M8[ns]'),
(operator.mod, '<M8[ns]'),
(operator.truediv, '<M8[ns]'),
(operator.mul, '<M8[ns]'),
(operator.add, '<M8[ns]'),
(operator.pow, '<m8[ns]'),
(operator.mul, '<m8[ns]')}
if (op, dtype) in invalid:
with pytest.raises(TypeError):
op(s, e.value)
else:
# FIXME: Since dispatching to Series, this test no longer
# asserts anything meaningful
result = op(s, e.value).dtypes
expected = op(s, value).dtypes
assert_series_equal(result, expected)
@pytest.mark.parametrize('typestr, holder', [
('category', Categorical),
('M8[ns]', DatetimeArray),
('M8[ns, US/Central]', DatetimeArray),
('m8[ns]', TimedeltaArray),
('sparse', SparseArray),
])
def test_holder(typestr, holder):
blk = create_block(typestr, [1])
assert blk._holder is holder
def test_deprecated_fastpath():
# GH#19265
values = np.random.rand(3, 3)
with tm.assert_produces_warning(DeprecationWarning,
check_stacklevel=False):
make_block(values, placement=np.arange(3), fastpath=True)
def test_validate_ndim():
values = np.array([1.0, 2.0])
placement = slice(2)
msg = r"Wrong number of dimensions. values.ndim != ndim \[1 != 2\]"
with pytest.raises(ValueError, match=msg):
make_block(values, placement, ndim=2)
def test_block_shape():
idx = pd.Index([0, 1, 2, 3, 4])
a = pd.Series([1, 2, 3]).reindex(idx)
b = pd.Series(pd.Categorical([1, 2, 3])).reindex(idx)
assert (a._data.blocks[0].mgr_locs.indexer ==
b._data.blocks[0].mgr_locs.indexer)
| bsd-3-clause |
fierval/retina | DiabeticRetinopathy/Learning/learning.py | 1 | 1369 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from kobra import SKSupervisedLearning
from kobra.tr_utils import time_now_str
import numpy as np
sample_file = '/kaggle/retina/reduced/features/sample/features.csv'
df = pd.read_csv(sample_file)
n_bins = 100
feats = df.ix[:, :n_bins * 2].values.astype(np.float)
levels = df['level'].values
names = df['name'].values
X_train, X_test, Y_train, Y_test = train_test_split(feats, levels, test_size = 0.2)
print "Read, train: {:d}, test: {:d}".format(X_train.shape[0], X_test.shape[0])
rf = SKSupervisedLearning(SVC, X_train, Y_train, X_test, Y_test)
#rf.estimation_params = {'max_depth' : [4, 10, 100], 'min_samples_leaf': [3, 5, 20],
# 'max_features': [1.0, 0.3, 0.1]}
# parameters tuned from the above
#rf.train_params = {'n_estimators' : 1000, 'max_features': 'sqrt'}
rf.train_params = {'C': 100, 'gamma' : 0.001, 'probability' : True, 'class_weight': 'auto'}
rf.scoring = "accuracy"
print "Instantiated classifier"
rf.fit_standard_scaler()
#rf.grid_search_classifier()
print "Starting: ", time_now_str()
a_train, a_test = rf.fit_and_validate()
print "Finished: ", time_now_str()
print "Accuracy: \n\tTrain: {:2.5f}\n\tTest: {:2.5f}".format(a_train, a_test)
rf.plot_confusion() | mit |
apbard/scipy | scipy/signal/signaltools.py | 1 | 115688 | # Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
import threading
import sys
import timeit
from . import sigtools, dlti
from ._upfirdn import upfirdn, _output_len
from scipy._lib.six import callable
from scipy._lib._version import NumpyVersion
from scipy import fftpack, linalg
from numpy import (allclose, angle, arange, argsort, array, asarray,
atleast_1d, atleast_2d, cast, dot, exp, expand_dims,
iscomplexobj, mean, ndarray, newaxis, ones, pi,
poly, polyadd, polyder, polydiv, polymul, polysub, polyval,
product, r_, ravel, real_if_close, reshape,
roots, sort, take, transpose, unique, where, zeros,
zeros_like)
import numpy as np
import math
from scipy.special import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
from .filter_design import cheby1, _validate_sos
from .fir_filter_design import firwin
if sys.version_info.major >= 3 and sys.version_info.minor >= 5:
from math import gcd
else:
from fractions import gcd
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'sosfilt', 'deconvolve', 'hilbert', 'hilbert2',
'cmplx_sort', 'unique_roots', 'invres', 'invresz', 'residue',
'residuez', 'resample', 'resample_poly', 'detrend',
'lfilter_zi', 'sosfilt_zi', 'sosfiltfilt', 'choose_conv_method',
'filtfilt', 'decimate', 'vectorstrength']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
_rfft_mt_safe = (NumpyVersion(np.__version__) >= '1.9.0.dev-e24486e')
_rfft_lock = threading.Lock()
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm'"
" (or 'symmetric').")
val = boundary << 2
return val
def _inputs_swap_needed(mode, shape1, shape2):
"""
If in 'valid' mode, returns whether or not the input arrays need to be
swapped depending on whether `shape1` is at least as large as `shape2` in
every dimension.
This is important for some of the correlation and convolution
implementations in this module, where the larger array input needs to come
before the smaller array input when operating in this mode.
Note that if the mode provided is not 'valid', False is immediately
returned.
"""
if mode == 'valid':
ok1, ok2 = True, True
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
ok1 = False
if not d2 >= d1:
ok2 = False
if not (ok1 or ok2):
raise ValueError("For 'valid' mode, one must be at least "
"as large as the other in every dimension")
return not ok1
return False
def correlate(in1, in2, mode='full', method='auto'):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] = sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')`` then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + np.random.randn(len(sig))
>>> corr = signal.correlate(sig_noise, np.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = np.arange(64, len(sig), 128)
>>> fig, (ax_orig, ax_noise, ax_corr) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(sig_noise)
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(corr)
>>> ax_corr.plot(clock, corr[clock], 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# Don't use _valfrommode, since correlate should not accept numeric modes
try:
val = _modedict[mode]
except KeyError:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
# this either calls fftconvolve or this function with method=='direct'
if method in ('fft', 'auto'):
return convolve(in1, _reverse_and_conj(in2), mode, method)
# fastpath to faster numpy.correlate for 1d inputs when possible
if _np_conv_ok(in1, in2, mode):
return np.correlate(in1, in2, mode)
# _correlateND is far slower when in2.size > in1.size, so swap them
# and then undo the effect afterward if mode == 'full'. Also, it fails
# with 'valid' mode if in2 is larger than in1, so swap those, too.
# Don't swap inputs for 'same' mode, since shape of in1 matters.
swapped_inputs = ((mode == 'full') and (in2.size > in1.size) or
_inputs_swap_needed(mode, in1.shape, in2.shape))
if swapped_inputs:
in1, in2 = in2, in1
if mode == 'valid':
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
if swapped_inputs:
# Reverse and conjugate to undo the effect of swapping inputs
z = _reverse_and_conj(z)
return z
def _centered(arr, newshape):
# Return the center newshape portion of the array.
newshape = asarray(newshape)
currshape = array(arr.shape)
startind = (currshape - newshape) // 2
endind = startind + newshape
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
As of v0.19, `convolve` automatically chooses this method or the direct
method based on an estimation of which is faster.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Autocorrelation of white noise is an impulse.
>>> from scipy import signal
>>> sig = np.random.randn(1000)
>>> autocorr = signal.fftconvolve(sig, sig[::-1], mode='full')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag) = plt.subplots(2, 1)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('White noise')
>>> ax_mag.plot(np.arange(-len(sig)+1,len(sig)), autocorr)
>>> ax_mag.set_title('Autocorrelation')
>>> fig.tight_layout()
>>> fig.show()
Gaussian blur implemented using FFT convolution. Notice the dark borders
around the image, due to the zero-padding beyond its boundaries.
The `convolve2d` function allows for other types of image boundaries,
but is far slower.
>>> from scipy import misc
>>> face = misc.face(gray=True)
>>> kernel = np.outer(signal.gaussian(70, 8), signal.gaussian(70, 8))
>>> blurred = signal.fftconvolve(face, kernel, mode='same')
>>> fig, (ax_orig, ax_kernel, ax_blurred) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_kernel.imshow(kernel, cmap='gray')
>>> ax_kernel.set_title('Gaussian kernel')
>>> ax_kernel.set_axis_off()
>>> ax_blurred.imshow(blurred, cmap='gray')
>>> ax_blurred.set_title('Blurred')
>>> ax_blurred.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if in1.ndim == in2.ndim == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, complex) or
np.issubdtype(in2.dtype, complex))
shape = s1 + s2 - 1
# Check that input sizes are compatible with 'valid' mode
if _inputs_swap_needed(mode, s1, s2):
# Convolution is commutative; order doesn't have any effect on output
in1, s1, in2, s2 = in2, s2, in1, s1
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [fftpack.helper.next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
# Pre-1.9 NumPy FFT routines are not threadsafe. For older NumPys, make
# sure we only call rfftn/irfftn from one thread at a time.
if not complex_result and (_rfft_mt_safe or _rfft_lock.acquire(False)):
try:
sp1 = np.fft.rfftn(in1, fshape)
sp2 = np.fft.rfftn(in2, fshape)
ret = (np.fft.irfftn(sp1 * sp2, fshape)[fslice].copy())
finally:
if not _rfft_mt_safe:
_rfft_lock.release()
else:
# If we're here, it's either because we need a complex result, or we
# failed to acquire _rfft_lock (meaning rfftn isn't threadsafe and
# is already in use by another thread). In either case, use the
# (threadsafe but slower) SciPy complex-FFT routines instead.
sp1 = fftpack.fftn(in1, fshape)
sp2 = fftpack.fftn(in2, fshape)
ret = fftpack.ifftn(sp1 * sp2)[fslice].copy()
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full'.")
def _numeric_arrays(arrays, kinds='buifc'):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _prod(iterable):
"""
Product of a list of numbers.
Faster than np.prod for short lists like array shapes.
"""
product = 1
for x in iterable:
product *= x
return product
def _fftconv_faster(x, h, mode):
"""
See if using `fftconvolve` or `_correlateND` is faster. The boolean value
returned depends on the sizes and shapes of the input values.
The big O ratios were found to hold across different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
early 2015 MacBook Pro with 8GB RAM and an Intel i5 processor.
"""
if mode == 'full':
out_shape = [n + k - 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 10963.92823819 if x.ndim == 1 else 8899.1104874
elif mode == 'same':
out_shape = x.shape
if x.ndim == 1:
if h.size <= x.size:
big_O_constant = 7183.41306773
else:
big_O_constant = 856.78174111
else:
big_O_constant = 34519.21021589
elif mode == 'valid':
out_shape = [n - k + 1 for n, k in zip(x.shape, h.shape)]
big_O_constant = 41954.28006344 if x.ndim == 1 else 66453.24316434
else:
raise ValueError('mode is invalid')
# see whether the Fourier transform convolution method or the direct
# convolution method is faster (discussed in scikit-image PR #1792)
direct_time = (x.size * h.size * _prod(out_shape))
fft_time = sum(n * math.log(n) for n in (x.shape + h.shape +
tuple(out_shape)))
return big_O_constant * fft_time < direct_time
def _reverse_and_conj(x):
"""
Reverse array `x` in all dimensions and perform the complex conjugate
"""
reverse = [slice(None, None, -1)] * x.ndim
return x[reverse].conj()
def _np_conv_ok(volume, kernel, mode):
"""
See if numpy supports convolution of `volume` and `kernel` (i.e. both are
1D ndarrays and of the appropriate shape). Numpy's 'same' mode uses the
size of the larger input, while Scipy's uses the size of the first input.
"""
np_conv_ok = volume.ndim == kernel.ndim == 1
return np_conv_ok and (volume.size >= kernel.size or mode != 'same')
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
def choose_conv_method(in1, in2, mode='full', measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`, but can also be used when performing many
convolutions of the same input shapes and dtypes, determining
which method to use for all of them, either to avoid the overhead of the
'auto' option or to use accurate real-world measurements.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
For large n, ``measure=False`` is accurate and can quickly determine the
fastest method to perform the convolution. However, this is not as
accurate for small n (when any dimension in the input or output is small).
In practice, we found that this function estimates the faster method up to
a multiplicative factor of 5 (i.e., the estimated method is *at most* 5
times slower than the fastest method). The estimation values were tuned on
an early 2015 MacBook Pro with 8GB RAM but we found that the prediction
held *fairly* accurately across different machines.
If ``measure=True``, time the convolutions. Because this function uses
`fftconvolve`, an error will be thrown if it does not support the inputs.
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 0.19
Examples
--------
Estimate the fastest method for a given input:
>>> from scipy import signal
>>> a = np.random.randn(1000)
>>> b = np.random.randn(1000000)
>>> method = signal.choose_conv_method(a, b, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> c = np.random.randn(1000)
>>> d = np.random.randn(1000000)
>>> # `method` works with correlate and convolve
>>> corr1 = signal.correlate(a, b, mode='same', method=method)
>>> corr2 = signal.correlate(c, d, mode='same', method=method)
>>> conv1 = signal.convolve(a, b, mode='same', method=method)
>>> conv2 = signal.convolve(c, d, mode='same', method=method)
"""
volume = asarray(in1)
kernel = asarray(in2)
if measure:
times = {}
for method in ['fft', 'direct']:
times[method] = _timeit_fast(lambda: convolve(volume, kernel,
mode=mode, method=method))
chosen_method = 'fft' if times['fft'] < times['direct'] else 'direct'
return chosen_method, times
# fftconvolve doesn't support complex256
fftconv_unsup = "complex256" if sys.maxsize > 2**32 else "complex192"
if hasattr(np, fftconv_unsup):
if volume.dtype == fftconv_unsup or kernel.dtype == fftconv_unsup:
return 'direct'
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds='ui') for x in [volume, kernel]]):
max_value = int(np.abs(volume).max()) * int(np.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2**np.finfo('float').nmant - 1:
return 'direct'
if _numeric_arrays([volume, kernel], kinds='b'):
return 'direct'
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return 'fft'
return 'direct'
def convolve(in1, in2, mode='full', method='auto'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the convolution.
``direct``
The convolution is determined directly from sums, the definition of
convolution.
``fft``
The Fourier Transform is used to perform the convolution by calling
`fftconvolve`.
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See Notes for more detail.
.. versionadded:: 0.19.0
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
See Also
--------
numpy.polymul : performs polynomial multiplication (same operation, but
also accepts poly1d objects)
choose_conv_method : chooses the fastest appropriate convolution method
fftconvolve
Notes
-----
By default, `convolve` and `correlate` use ``method='auto'``, which calls
`choose_conv_method` to choose the fastest method using pre-computed
values (`choose_conv_method` can also measure real-world timing with a
keyword argument). Because `fftconvolve` relies on floating point numbers,
there are certain constraints that may force `method=direct` (more detail
in `choose_conv_method` docstring).
Examples
--------
Smooth a square pulse using a Hann window:
>>> from scipy import signal
>>> sig = np.repeat([0., 1., 0.], 100)
>>> win = signal.hann(50)
>>> filtered = signal.convolve(sig, win, mode='same') / sum(win)
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.set_title('Original pulse')
>>> ax_orig.margins(0, 0.1)
>>> ax_win.plot(win)
>>> ax_win.set_title('Filter impulse response')
>>> ax_win.margins(0, 0.1)
>>> ax_filt.plot(filtered)
>>> ax_filt.set_title('Filtered signal')
>>> ax_filt.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
volume = asarray(in1)
kernel = asarray(in2)
if volume.ndim == kernel.ndim == 0:
return volume * kernel
if _inputs_swap_needed(mode, volume.shape, kernel.shape):
# Convolution is commutative; order doesn't have any effect on output
volume, kernel = kernel, volume
if method == 'auto':
method = choose_conv_method(volume, kernel, mode=mode)
if method == 'fft':
out = fftconvolve(volume, kernel, mode=mode)
result_type = np.result_type(volume, kernel)
if result_type.kind in {'u', 'i'}:
out = np.around(out)
return out.astype(result_type)
# fastpath to faster numpy.convolve for 1d inputs when possible
if _np_conv_ok(volume, kernel, mode):
return np.convolve(volume, kernel, mode)
return correlate(volume, _reverse_and_conj(kernel), mode, 'direct')
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `a`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `a`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * volume.ndim
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(volume.ndim):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or array_like, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * im.ndim
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') /
product(mysize, axis=0) - lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
Examples
--------
Compute the gradient of an image by 2D convolution with a complex Scharr
operator. (Horizontal operator is real, vertical is imaginary.) Use
symmetric boundary condition to avoid creating edges at the image
boundaries.
>>> from scipy import signal
>>> from scipy import misc
>>> ascent = misc.ascent()
>>> scharr = np.array([[ -3-3j, 0-10j, +3 -3j],
... [-10+0j, 0+ 0j, +10 +0j],
... [ -3+3j, 0+10j, +3 +3j]]) # Gx + j*Gy
>>> grad = signal.convolve2d(ascent, scharr, boundary='symm', mode='same')
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_mag, ax_ang) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(ascent, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_mag.imshow(np.absolute(grad), cmap='gray')
>>> ax_mag.set_title('Gradient magnitude')
>>> ax_mag.set_axis_off()
>>> ax_ang.imshow(np.angle(grad), cmap='hsv') # hsv is cyclic, like angles
>>> ax_ang.set_title('Gradient orientation')
>>> ax_ang.set_axis_off()
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('convolve2d inputs must both be 2D arrays')
if _inputs_swap_needed(mode, in1.shape, in2.shape):
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
If operating in 'valid' mode, either `in1` or `in2` must be
at least as large as the other in every dimension.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> from scipy import signal
>>> from scipy import misc
>>> face = misc.face(gray=True) - misc.face(gray=True).mean()
>>> template = np.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + np.random.randn(*face.shape) * 50 # add noise
>>> corr = signal.correlate2d(face, template, boundary='symm', mode='same')
>>> y, x = np.unravel_index(np.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,
... figsize=(6, 15))
>>> ax_orig.imshow(face, cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(template, cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(corr, cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(x, y, 'ro')
>>> fig.show()
"""
in1 = asarray(in1)
in2 = asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError('correlate2d inputs must both be 2D arrays')
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
if swapped_inputs:
out = out[::-1, ::-1]
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a), len(b)) - 1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
lfiltic : Construct initial conditions for `lfilter`.
lfilter_zi : Compute initial state (steady state of step response) for
`lfilter`.
filtfilt : A forward-backward filter, to obtain a filter with linear phase.
savgol_filter : A Savitzky-Golay filter.
sosfilt: Filter data using cascaded second-order sections.
sosfiltfilt: A forward-backward filter using second-order sections.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[M]*x[n-M]
- a[1]*y[n-1] - ... - a[N]*y[n-N]
where `M` is the degree of the numerator, `N` is the degree of the
denominator, and `n` is the sample number. It is implemented using
the following difference equations (assuming M = N)::
a[0]*y[n] = b[0] * x[n] + d[0][n-1]
d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1]
d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1]
...
d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1]
d[N-1][n] = b[N] * x[n] - a[N] * y[n]
where `d` are the state variables.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -M
b[0] + b[1]z + ... + b[M] z
Y(z) = -------------------------------- X(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Examples
--------
Generate a noisy signal to be filtered:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 201)
>>> x = (np.sin(2*np.pi*0.75*t*(1-t) + 2.1) +
... 0.1*np.sin(2*np.pi*1.25*t + 1) +
... 0.18*np.cos(2*np.pi*3.85*t))
>>> xn = x + np.random.randn(len(t)) * 0.08
Create an order 3 lowpass butterworth filter:
>>> b, a = signal.butter(3, 0.05)
Apply the filter to xn. Use lfilter_zi to choose the initial condition of
the filter:
>>> zi = signal.lfilter_zi(b, a)
>>> z, _ = signal.lfilter(b, a, xn, zi=zi*xn[0])
Apply the filter again, to have a result filtered at an order the same as
filtfilt:
>>> z2, _ = signal.lfilter(b, a, z, zi=zi*z[0])
Use filtfilt to apply the filter:
>>> y = signal.filtfilt(b, a, xn)
Plot the original signal and the various filtered versions:
>>> plt.figure
>>> plt.plot(t, xn, 'b', alpha=0.75)
>>> plt.plot(t, z, 'r--', t, z2, 'r', t, y, 'k')
>>> plt.legend(('noisy signal', 'lfilter, once', 'lfilter, twice',
... 'filtfilt'), loc='best')
>>> plt.grid(True)
>>> plt.show()
"""
a = np.atleast_1d(a)
if len(a) == 1:
# This path only supports types fdgFDGO to mirror _linear_filter below.
# Any of b, a, x, or zi can set the dtype, but there is no default
# casting of other types; instead a NotImplementedError is raised.
b = np.asarray(b)
a = np.asarray(a)
if b.ndim != 1 and a.ndim != 1:
raise ValueError('object of too small depth for desired array')
x = np.asarray(x)
inputs = [b, a, x]
if zi is not None:
# _linear_filter does not broadcast zi, but does do expansion of
# singleton dims.
zi = np.asarray(zi)
if zi.ndim != x.ndim:
raise ValueError('object of too small depth for desired array')
expected_shape = list(x.shape)
expected_shape[axis] = b.shape[0] - 1
expected_shape = tuple(expected_shape)
# check the trivial case where zi is the right shape first
if zi.shape != expected_shape:
strides = zi.ndim * [None]
if axis < 0:
axis += zi.ndim
for k in range(zi.ndim):
if k == axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == expected_shape[k]:
strides[k] = zi.strides[k]
elif k != axis and zi.shape[k] == 1:
strides[k] = 0
else:
raise ValueError('Unexpected shape for zi: expected '
'%s, found %s.' %
(expected_shape, zi.shape))
zi = np.lib.stride_tricks.as_strided(zi, expected_shape,
strides)
inputs.append(zi)
dtype = np.result_type(*inputs)
if dtype.char not in 'fdgFDGO':
raise NotImplementedError("input type '%s' not supported" % dtype)
b = np.array(b, dtype=dtype)
a = np.array(a, dtype=dtype, copy=False)
b /= a[0]
x = np.array(x, dtype=dtype, copy=False)
out_full = np.apply_along_axis(lambda y: np.convolve(b, y), axis, x)
ind = out_full.ndim * [slice(None)]
if zi is not None:
ind[axis] = slice(zi.shape[axis])
out_full[ind] += zi
ind[axis] = slice(out_full.shape[axis] - len(b) + 1)
out = out_full[ind]
if zi is None:
return out
else:
ind[axis] = slice(out_full.shape[axis] - len(b) + 1, None)
zf = out_full[ind]
return out, zf
else:
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter given input and output vectors.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the initial conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N = len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M = len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``,
where ``K = max(M, N)``.
See Also
--------
lfilter, lfilter_zi
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
if y.dtype.kind in 'bui':
# ensure calculations are floating point
y = y.astype(np.float64)
zi = zeros(K, y.dtype)
if x is None:
x = zeros(M, y.dtype)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = np.sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= np.sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves ``divisor`` out of ``signal`` using inverse filtering.
Returns the quotient and remainder such that
``signal = convolve(divisor, quotient) + remainder``
Parameters
----------
signal : array_like
Signal data, typically a recorded signal
divisor : array_like
Divisor data, typically an impulse response or filter that was
applied to the original signal
Returns
-------
quotient : ndarray
Quotient, typically the recovered original signal
remainder : ndarray
Remainder
Examples
--------
Deconvolve a signal that's been filtered:
>>> from scipy import signal
>>> original = [0, 1, 0, 0, 1, 1, 0, 0]
>>> impulse_response = [2, 1]
>>> recorded = signal.convolve(impulse_response, original)
>>> recorded
array([0, 2, 1, 0, 2, 3, 1, 0, 0])
>>> recovered, remainder = signal.deconvolve(recorded, impulse_response)
>>> recovered
array([ 0., 1., 0., 0., 1., 1., 0., 0.])
See Also
--------
numpy.polydiv : performs polynomial division (same operation, but
also accepts poly1d objects)
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = zeros(N - D + 1, float)
input[0] = 1
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
See Also
--------
scipy.fftpack.hilbert : Return Hilbert transform of a periodic sequence x.
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
Examples
---------
In this example we use the Hilbert transform to determine the amplitude
envelope and instantaneous frequency of an amplitude-modulated signal.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.signal import hilbert, chirp
>>> duration = 1.0
>>> fs = 400.0
>>> samples = int(fs*duration)
>>> t = np.arange(samples) / fs
We create a chirp of which the frequency increases from 20 Hz to 100 Hz and
apply an amplitude modulation.
>>> signal = chirp(t, 20.0, t[-1], 100.0)
>>> signal *= (1.0 + 0.5 * np.sin(2.0*np.pi*3.0*t) )
The amplitude envelope is given by magnitude of the analytic signal. The
instantaneous frequency can be obtained by differentiating the
instantaneous phase in respect to time. The instantaneous phase corresponds
to the phase angle of the analytic signal.
>>> analytic_signal = hilbert(signal)
>>> amplitude_envelope = np.abs(analytic_signal)
>>> instantaneous_phase = np.unwrap(np.angle(analytic_signal))
>>> instantaneous_frequency = (np.diff(instantaneous_phase) /
... (2.0*np.pi) * fs)
>>> fig = plt.figure()
>>> ax0 = fig.add_subplot(211)
>>> ax0.plot(t, signal, label='signal')
>>> ax0.plot(t, amplitude_envelope, label='envelope')
>>> ax0.set_xlabel("time in seconds")
>>> ax0.legend()
>>> ax1 = fig.add_subplot(212)
>>> ax1.plot(t[1:], instantaneous_frequency)
>>> ax1.set_xlabel("time in seconds")
>>> ax1.set_ylim(0.0, 120.0)
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
.. [2] Leon Cohen, "Time-Frequency Analysis", 1995. Chapter 2.
.. [3] Alan V. Oppenheim, Ronald W. Schafer. Discrete-Time Signal
Processing, Third Edition, 2009. Chapter 12.
ISBN 13: 978-1292-02572-8
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fftpack.fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if x.ndim > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = fftpack.ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if x.ndim > 2:
raise ValueError("x must be 2-D.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fftpack.fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = x.ndim
while k > 2:
h = h[:, newaxis]
k -= 1
x = fftpack.ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"""Sort roots based on magnitude.
Parameters
----------
p : array_like
The roots to sort, as a 1-D array.
Returns
-------
p_sorted : ndarray
Sorted roots.
indx : ndarray
Array of indices needed to sort the input `p`.
"""
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `invresz`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residue, invresz, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * atleast_1d(poly(t2)))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(s) b[0] s**(M) + b[1] s**(M-1) + ... + b[M]
H(s) = ------ = ------------------------------------------
a(s) a[0] s**(N) + a[1] s**(N-1) + ... + a[N]
then the partial-fraction expansion H(s) is defined as::
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
This function is used for polynomials in positive powers of s or z,
such as analog filters or digital filters in controls engineering. For
negative powers of z (typical for digital filters in DSP), use `residuez`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, residuez, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, pout[n]) / polyval(an, pout[n]) /
factorial(sig - m))
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `residue`.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invresz, residue, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion.
If `M` is the degree of numerator `b` and `N` the degree of denominator
`a`::
b(z) b[0] + b[1] z**(-1) + ... + b[M] z**(-M)
H(z) = ------ = ------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N] z**(-N)
then the partial-fraction expansion H(z) is defined as::
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than `tol`), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
This function is used for polynomials in negative powers of z,
such as digital filters in DSP. For positive powers, use `invres`.
Parameters
----------
r : array_like
Residues.
p : array_like
Poles.
k : array_like
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
See Also
--------
residuez, unique_roots, invres
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * atleast_1d(poly(t2)))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * len(x) / num``.
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
As noted, `resample` uses FFT transformations, which can be very
slow if the number of input or output samples is large and prime;
see `scipy.fftpack.fft`.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f = signal.resample(y, 100)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'go-', xnew, f, '.-', 10, y[0], 'ro')
>>> plt.legend(['data', 'resampled'], loc='best')
>>> plt.show()
"""
x = asarray(x)
X = fftpack.fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftpack.fftfreq(Nx))
elif isinstance(window, ndarray):
if window.shape != (Nx,):
raise ValueError('window must have the same length as data')
W = window
else:
W = fftpack.ifftshift(get_window(window, Nx))
newshape = [1] * x.ndim
newshape[axis] = len(W)
W.shape = newshape
X = X * W
W.shape = (Nx,)
sl = [slice(None)] * x.ndim
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
if N % 2 == 0: # special treatment if low number of points is even. So far we have set Y[-N/2]=X[-N/2]
if N < Nx: # if downsampling
sl[axis] = slice(N//2,N//2+1,None) # select the component at frequency N/2
Y[sl] += X[sl] # add the component of X at N/2
elif N < num: # if upsampling
sl[axis] = slice(num-N//2,num-N//2+1,None) # select the component at frequency -N/2
Y[sl] /= 2 # halve the component at -N/2
temp = Y[sl]
sl[axis] = slice(N//2,N//2+1,None) # select the component at +N/2
Y[sl] = temp # set that equal to the component at -N/2
y = fftpack.ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def resample_poly(x, up, down, axis=0, window=('kaiser', 5.0)):
"""
Resample `x` along the given axis using polyphase filtering.
The signal `x` is upsampled by the factor `up`, a zero-phase low-pass
FIR filter is applied, and then it is downsampled by the factor `down`.
The resulting sample rate is ``up / down`` times the original sample
rate. Values beyond the boundary of the signal are assumed to be zero
during the filtering step.
Parameters
----------
x : array_like
The data to be resampled.
up : int
The upsampling factor.
down : int
The downsampling factor.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : string, tuple, or array_like, optional
Desired window to use to design the low-pass filter, or the FIR filter
coefficients to employ. See below for details.
Returns
-------
resampled_x : array
The resampled array.
See Also
--------
decimate : Downsample the signal after applying an FIR or IIR filter.
resample : Resample up or down using the FFT method.
Notes
-----
This polyphase method will likely be faster than the Fourier method
in `scipy.signal.resample` when the number of samples is large and
prime, or when the number of samples is large and `up` and `down`
share a large greatest common denominator. The length of the FIR
filter used will depend on ``max(up, down) // gcd(up, down)``, and
the number of operations during polyphase filtering will depend on
the filter length and `down` (see `scipy.signal.upfirdn` for details).
The argument `window` specifies the FIR low-pass filter design.
If `window` is an array_like it is assumed to be the FIR filter
coefficients. Note that the FIR filter is applied after the upsampling
step, so it should be designed to operate on a signal at a sampling
frequency higher than the original by a factor of `up//gcd(up, down)`.
This function's output will be centered with respect to this array, so it
is best to pass a symmetric filter with an odd number of samples if, as
is usually the case, a zero-phase filter is desired.
For any other type of `window`, the functions `scipy.signal.get_window`
and `scipy.signal.firwin` are called to generate the appropriate filter
coefficients.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from ``dx`` to ``dx * down / float(up)``.
Examples
--------
Note that the end of the resampled data rises to meet the first
sample of the next cycle for the FFT method, and gets closer to zero
for the polyphase method:
>>> from scipy import signal
>>> x = np.linspace(0, 10, 20, endpoint=False)
>>> y = np.cos(-x**2/6.0)
>>> f_fft = signal.resample(y, 100)
>>> f_poly = signal.resample_poly(y, 100, 20)
>>> xnew = np.linspace(0, 10, 100, endpoint=False)
>>> import matplotlib.pyplot as plt
>>> plt.plot(xnew, f_fft, 'b.-', xnew, f_poly, 'r.-')
>>> plt.plot(x, y, 'ko-')
>>> plt.plot(10, y[0], 'bo', 10, 0., 'ro') # boundaries
>>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best')
>>> plt.show()
"""
x = asarray(x)
up = int(up)
down = int(down)
if up < 1 or down < 1:
raise ValueError('up and down must be >= 1')
# Determine our up and down factors
# Use a rational approimation to save computation time on really long
# signals
g_ = gcd(up, down)
up //= g_
down //= g_
if up == down == 1:
return x.copy()
n_out = x.shape[axis] * up
n_out = n_out // down + bool(n_out % down)
if isinstance(window, (list, np.ndarray)):
window = array(window) # use array to force a copy (we modify it)
if window.ndim > 1:
raise ValueError('window must be 1-D')
half_len = (window.size - 1) // 2
h = window
else:
# Design a linear-phase low-pass FIR filter
max_rate = max(up, down)
f_c = 1. / max_rate # cutoff of FIR filter (rel. to Nyquist)
half_len = 10 * max_rate # reasonable cutoff for our sinc-like function
h = firwin(2 * half_len + 1, f_c, window=window)
h *= up
# Zero-pad our filter to put the output samples at the center
n_pre_pad = (down - half_len % down)
n_post_pad = 0
n_pre_remove = (half_len + n_pre_pad) // down
# We should rarely need to do this given our filter lengths...
while _output_len(len(h) + n_pre_pad + n_post_pad, x.shape[axis],
up, down) < n_out + n_pre_remove:
n_post_pad += 1
h = np.concatenate((np.zeros(n_pre_pad), h, np.zeros(n_post_pad)))
n_pre_remove_end = n_pre_remove + n_out
# filter then remove excess
y = upfirdn(h, x, up, down, axis=axis)
keep = [slice(None), ]*x.ndim
keep[axis] = slice(n_pre_remove, n_pre_remove_end)
return y[keep]
def vectorstrength(events, period):
'''
Determine the vector strength of the events corresponding to the given
period.
The vector strength is a measure of phase synchrony, how well the
timing of the events is synchronized to a single period of a periodic
signal.
If multiple periods are used, calculate the vector strength of each.
This is called the "resonating vector strength".
Parameters
----------
events : 1D array_like
An array of time points containing the timing of the events.
period : float or array_like
The period of the signal that the events should synchronize to.
The period is in the same units as `events`. It can also be an array
of periods, in which case the outputs are arrays of the same length.
Returns
-------
strength : float or 1D array
The strength of the synchronization. 1.0 is perfect synchronization
and 0.0 is no synchronization. If `period` is an array, this is also
an array with each element containing the vector strength at the
corresponding period.
phase : float or array
The phase that the events are most strongly synchronized to in radians.
If `period` is an array, this is also an array with each element
containing the phase for the corresponding period.
References
----------
van Hemmen, JL, Longtin, A, and Vollmayr, AN. Testing resonating vector
strength: Auditory system, electric fish, and noise.
Chaos 21, 047508 (2011);
:doi:`10.1063/1.3670512`.
van Hemmen, JL. Vector strength after Goldberg, Brown, and von Mises:
biological and mathematical perspectives. Biol Cybern.
2013 Aug;107(4):385-96. :doi:`10.1007/s00422-013-0561-7`.
van Hemmen, JL and Vollmayr, AN. Resonating vector strength: what happens
when we vary the "probing" frequency while keeping the spike times
fixed. Biol Cybern. 2013 Aug;107(4):491-94.
:doi:`10.1007/s00422-013-0560-8`.
'''
events = asarray(events)
period = asarray(period)
if events.ndim > 1:
raise ValueError('events cannot have dimensions more than 1')
if period.ndim > 1:
raise ValueError('period cannot have dimensions more than 1')
# we need to know later if period was originally a scalar
scalarperiod = not period.ndim
events = atleast_2d(events)
period = atleast_2d(period)
if (period <= 0).any():
raise ValueError('periods must be positive')
# this converts the times to vectors
vectors = exp(dot(2j*pi/period.T, events))
# the vector strength is just the magnitude of the mean of the vectors
# the vector phase is the angle of the mean of the vectors
vectormean = mean(vectors, axis=1)
strength = abs(vectormean)
phase = angle(vectormean)
# if the original period was a scalar, return scalars
if scalarperiod:
strength = strength[0]
phase = phase[0]
return strength, phase
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1000
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, _prod(dshape) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Construct initial conditions for lfilter for step response steady-state.
Compute an initial state `zi` for the `lfilter` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
See Also
--------
lfilter, lfiltic, filtfilt
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be 1-D.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be 1-D.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
b = b / a[0]
a = a / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def sosfilt_zi(sos):
"""
Construct initial conditions for sosfilt for step response steady-state.
Compute an initial state `zi` for the `sosfilt` function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
zi : ndarray
Initial conditions suitable for use with ``sosfilt``, shape
``(n_sections, 2)``.
See Also
--------
sosfilt, zpk2sos
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
Filter a rectangular pulse that begins at time 0, with and without
the use of the `zi` argument of `scipy.signal.sosfilt`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sos = signal.butter(9, 0.125, output='sos')
>>> zi = signal.sosfilt_zi(sos)
>>> x = (np.arange(250) < 100).astype(int)
>>> f1 = signal.sosfilt(sos, x)
>>> f2, zo = signal.sosfilt(sos, x, zi=zi)
>>> plt.plot(x, 'k--', label='x')
>>> plt.plot(f1, 'b', alpha=0.5, linewidth=2, label='filtered')
>>> plt.plot(f2, 'g', alpha=0.25, linewidth=4, label='filtered with zi')
>>> plt.legend(loc='best')
>>> plt.show()
"""
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def _filtfilt_gust(b, a, x, axis=-1, irlen=None):
"""Forward-backward IIR filter that uses Gustafsson's method.
Apply the IIR filter defined by `(b,a)` to `x` twice, first forward
then backward, using Gustafsson's initial conditions [1]_.
Let ``y_fb`` be the result of filtering first forward and then backward,
and let ``y_bf`` be the result of filtering first backward then forward.
Gustafsson's method is to compute initial conditions for the forward
pass and the backward pass such that ``y_fb == y_bf``.
Parameters
----------
b : scalar or 1-D ndarray
Numerator coefficients of the filter.
a : scalar or 1-D ndarray
Denominator coefficients of the filter.
x : ndarray
Data to be filtered.
axis : int, optional
Axis of `x` to be filtered. Default is -1.
irlen : int or None, optional
The length of the nonnegligible part of the impulse response.
If `irlen` is None, or if the length of the signal is less than
``2 * irlen``, then no part of the impulse response is ignored.
Returns
-------
y : ndarray
The filtered data.
x0 : ndarray
Initial condition for the forward filter.
x1 : ndarray
Initial condition for the backward filter.
Notes
-----
Typically the return values `x0` and `x1` are not needed by the
caller. The intended use of these return values is in unit tests.
References
----------
.. [1] F. Gustaffson. Determining the initial states in forward-backward
filtering. Transactions on Signal Processing, 46(4):988-992, 1996.
"""
# In the comments, "Gustafsson's paper" and [1] refer to the
# paper referenced in the docstring.
b = np.atleast_1d(b)
a = np.atleast_1d(a)
order = max(len(b), len(a)) - 1
if order == 0:
# The filter is just scalar multiplication, with no state.
scale = (b[0] / a[0])**2
y = scale * x
return y, np.array([]), np.array([])
if axis != -1 or axis != x.ndim - 1:
# Move the axis containing the data to the end.
x = np.swapaxes(x, axis, x.ndim - 1)
# n is the number of samples in the data to be filtered.
n = x.shape[-1]
if irlen is None or n <= 2*irlen:
m = n
else:
m = irlen
# Create Obs, the observability matrix (called O in the paper).
# This matrix can be interpreted as the operator that propagates
# an arbitrary initial state to the output, assuming the input is
# zero.
# In Gustafsson's paper, the forward and backward filters are not
# necessarily the same, so he has both O_f and O_b. We use the same
# filter in both directions, so we only need O. The same comment
# applies to S below.
Obs = np.zeros((m, order))
zi = np.zeros(order)
zi[0] = 1
Obs[:, 0] = lfilter(b, a, np.zeros(m), zi=zi)[0]
for k in range(1, order):
Obs[k:, k] = Obs[:-k, 0]
# Obsr is O^R (Gustafsson's notation for row-reversed O)
Obsr = Obs[::-1]
# Create S. S is the matrix that applies the filter to the reversed
# propagated initial conditions. That is,
# out = S.dot(zi)
# is the same as
# tmp, _ = lfilter(b, a, zeros(), zi=zi) # Propagate ICs.
# out = lfilter(b, a, tmp[::-1]) # Reverse and filter.
# Equations (5) & (6) of [1]
S = lfilter(b, a, Obs[::-1], axis=0)
# Sr is S^R (row-reversed S)
Sr = S[::-1]
# M is [(S^R - O), (O^R - S)]
if m == n:
M = np.hstack((Sr - Obs, Obsr - S))
else:
# Matrix described in section IV of [1].
M = np.zeros((2*m, 2*order))
M[:m, :order] = Sr - Obs
M[m:, order:] = Obsr - S
# Naive forward-backward and backward-forward filters.
# These have large transients because the filters use zero initial
# conditions.
y_f = lfilter(b, a, x)
y_fb = lfilter(b, a, y_f[..., ::-1])[..., ::-1]
y_b = lfilter(b, a, x[..., ::-1])[..., ::-1]
y_bf = lfilter(b, a, y_b)
delta_y_bf_fb = y_bf - y_fb
if m == n:
delta = delta_y_bf_fb
else:
start_m = delta_y_bf_fb[..., :m]
end_m = delta_y_bf_fb[..., -m:]
delta = np.concatenate((start_m, end_m), axis=-1)
# ic_opt holds the "optimal" initial conditions.
# The following code computes the result shown in the formula
# of the paper between equations (6) and (7).
if delta.ndim == 1:
ic_opt = linalg.lstsq(M, delta)[0]
else:
# Reshape delta so it can be used as an array of multiple
# right-hand-sides in linalg.lstsq.
delta2d = delta.reshape(-1, delta.shape[-1]).T
ic_opt0 = linalg.lstsq(M, delta2d)[0].T
ic_opt = ic_opt0.reshape(delta.shape[:-1] + (M.shape[-1],))
# Now compute the filtered signal using equation (7) of [1].
# First, form [S^R, O^R] and call it W.
if m == n:
W = np.hstack((Sr, Obsr))
else:
W = np.zeros((2*m, 2*order))
W[:m, :order] = Sr
W[m:, order:] = Obsr
# Equation (7) of [1] says
# Y_fb^opt = Y_fb^0 + W * [x_0^opt; x_{N-1}^opt]
# `wic` is (almost) the product on the right.
# W has shape (m, 2*order), and ic_opt has shape (..., 2*order),
# so we can't use W.dot(ic_opt). Instead, we dot ic_opt with W.T,
# so wic has shape (..., m).
wic = ic_opt.dot(W.T)
# `wic` is "almost" the product of W and the optimal ICs in equation
# (7)--if we're using a truncated impulse response (m < n), `wic`
# contains only the adjustments required for the ends of the signal.
# Here we form y_opt, taking this into account if necessary.
y_opt = y_fb
if m == n:
y_opt += wic
else:
y_opt[..., :m] += wic[..., :m]
y_opt[..., -m:] += wic[..., -m:]
x0 = ic_opt[..., :order]
x1 = ic_opt[..., -order:]
if axis != -1 or axis != x.ndim - 1:
# Restore the data axis to its original position.
x0 = np.swapaxes(x0, axis, x.ndim - 1)
x1 = np.swapaxes(x1, axis, x.ndim - 1)
y_opt = np.swapaxes(y_opt, axis, x.ndim - 1)
return y_opt, x0, x1
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None, method='pad',
irlen=None):
"""
Apply a digital filter forward and backward to a signal.
This function applies a linear digital filter twice, once forward and
once backwards. The combined filter has zero phase and a filter order
twice that of the original.
The function provides options for handling the edges of the signal.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is ``3 * max(len(a), len(b))``.
method : str, optional
Determines the method for handling the edges of the signal, either
"pad" or "gust". When `method` is "pad", the signal is padded; the
type of padding is determined by `padtype` and `padlen`, and `irlen`
is ignored. When `method` is "gust", Gustafsson's method is used,
and `padtype` and `padlen` are ignored.
irlen : int or None, optional
When `method` is "gust", `irlen` specifies the length of the
impulse response of the filter. If `irlen` is None, no part
of the impulse response is ignored. For a long signal, specifying
`irlen` can significantly improve the performance of the filter.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
sosfiltfilt, lfilter_zi, lfilter, lfiltic, savgol_filter, sosfilt
Notes
-----
When `method` is "pad", the function pads the data along the given axis
in one of three ways: odd, even or constant. The odd and even extensions
have the corresponding symmetry about the end point of the data. The
constant extension extends the data with the values at the end points. On
both the forward and backward passes, the initial condition of the
filter is found by using `lfilter_zi` and scaling it by the end point of
the extended data.
When `method` is "gust", Gustafsson's method [1]_ is used. Initial
conditions are chosen for the forward and backward passes so that the
forward-backward filter gives the same result as the backward-forward
filter.
The option to use Gustaffson's method was added in scipy version 0.16.0.
References
----------
.. [1] F. Gustaffson, "Determining the initial states in forward-backward
filtering", Transactions on Signal Processing, Vol. 46, pp. 988-992,
1996.
Examples
--------
The examples will use several functions from `scipy.signal`.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to ``x`` with `filtfilt`.
The result should be approximately ``xlow``, with no phase shift.
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
The following example demonstrates the option ``method="gust"``.
First, create a filter.
>>> b, a = signal.ellip(4, 0.01, 120, 0.125) # Filter to be applied.
>>> np.random.seed(123456)
`sig` is a random input signal to be filtered.
>>> n = 60
>>> sig = np.random.randn(n)**3 + 3*np.random.randn(n).cumsum()
Apply `filtfilt` to `sig`, once using the Gustafsson method, and
once using padding, and plot the results for comparison.
>>> fgust = signal.filtfilt(b, a, sig, method="gust")
>>> fpad = signal.filtfilt(b, a, sig, padlen=50)
>>> plt.plot(sig, 'k-', label='input')
>>> plt.plot(fgust, 'b-', linewidth=4, label='gust')
>>> plt.plot(fpad, 'c-', linewidth=1.5, label='pad')
>>> plt.legend(loc='best')
>>> plt.show()
The `irlen` argument can be used to improve the performance
of Gustafsson's method.
Estimate the impulse response length of the filter.
>>> z, p, k = signal.tf2zpk(b, a)
>>> eps = 1e-9
>>> r = np.max(np.abs(p))
>>> approx_impulse_len = int(np.ceil(np.log(eps) / np.log(r)))
>>> approx_impulse_len
137
Apply the filter to a longer signal, with and without the `irlen`
argument. The difference between `y1` and `y2` is small. For long
signals, using `irlen` gives a significant performance improvement.
>>> x = np.random.randn(5000)
>>> y1 = signal.filtfilt(b, a, x, method='gust')
>>> y2 = signal.filtfilt(b, a, x, method='gust', irlen=approx_impulse_len)
>>> print(np.max(np.abs(y1 - y2)))
1.80056858312e-10
"""
b = np.atleast_1d(b)
a = np.atleast_1d(a)
x = np.asarray(x)
if method not in ["pad", "gust"]:
raise ValueError("method must be 'pad' or 'gust'.")
if method == "gust":
y, z1, z2 = _filtfilt_gust(b, a, x, axis=axis, irlen=irlen)
return y
# method == "pad"
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=max(len(a), len(b)))
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def sosfilt(sos, x, axis=-1, zi=None):
"""
Filter data along one dimension using cascaded second-order sections.
Filter a data sequence, `x`, using a digital IIR filter defined by
`sos`. This is implemented by performing `lfilter` for each
second-order section. See `lfilter` for details.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
An N-dimensional input array.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the cascaded filter delays. It is a (at
least 2D) vector of shape ``(n_sections, ..., 2, ...)``, where
``..., 2, ...`` denotes the shape of `x`, but with ``x.shape[axis]``
replaced by 2. If `zi` is None or is not given then initial rest
(i.e. all zeros) is assumed.
Note that these initial conditions are *not* the same as the initial
conditions given by `lfiltic` or `lfilter_zi`.
Returns
-------
y : ndarray
The output of the digital filter.
zf : ndarray, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
See Also
--------
zpk2sos, sos2zpk, sosfilt_zi, sosfiltfilt, sosfreqz
Notes
-----
The filter function is implemented as a series of second-order filters
with direct-form II transposed structure. It is designed to minimize
numerical precision errors for high-order filters.
.. versionadded:: 0.16.0
Examples
--------
Plot a 13th-order filter's impulse response using both `lfilter` and
`sosfilt`, showing the instability that results from trying to do a
13th-order filter in a single stage (the numerical error pushes some poles
outside of the unit circle):
>>> import matplotlib.pyplot as plt
>>> from scipy import signal
>>> b, a = signal.ellip(13, 0.009, 80, 0.05, output='ba')
>>> sos = signal.ellip(13, 0.009, 80, 0.05, output='sos')
>>> x = signal.unit_impulse(700)
>>> y_tf = signal.lfilter(b, a, x)
>>> y_sos = signal.sosfilt(sos, x)
>>> plt.plot(y_tf, 'r', label='TF')
>>> plt.plot(y_sos, 'k', label='SOS')
>>> plt.legend(loc='best')
>>> plt.show()
"""
x = np.asarray(x)
sos, n_sections = _validate_sos(sos)
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r, got %r.' %
(axis, x.shape, n_sections, x_zi_shape, zi.shape))
zf = zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward digital filter using cascaded second-order sections.
See `filtfilt` for more complete information about this method.
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. Each row corresponds to a second-order
section, with the first three columns providing the numerator
coefficients and the last three providing the denominator
coefficients.
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
``x.shape[axis] - 1``. ``padlen=0`` implies no padding.
The default value is::
3 * (2 * len(sos) + 1 - min((sos[:, 2] == 0).sum(),
(sos[:, 5] == 0).sum()))
The extra subtraction at the end attempts to compensate for poles
and zeros at the origin (e.g. for odd-order filters) to yield
equivalent estimates of `padlen` to those of `filtfilt` for
second-order section filters built with `scipy.signal` functions.
Returns
-------
y : ndarray
The filtered output with the same shape as `x`.
See Also
--------
filtfilt, sosfilt, sosfilt_zi, sosfreqz
Notes
-----
.. versionadded:: 0.18.0
"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def decimate(x, q, n=None, ftype='iir', axis=-1, zero_phase=True):
"""
Downsample the signal after applying an anti-aliasing filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with Hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor. For downsampling factors higher than 13, it is
recommended to call `decimate` multiple times.
n : int, optional
The order of the filter (1 less than the length for 'fir'). Defaults to
8 for 'iir' and 30 for 'fir'.
ftype : str {'iir', 'fir'} or ``dlti`` instance, optional
If 'iir' or 'fir', specifies the type of lowpass filter. If an instance
of an `dlti` object, uses that object to filter before downsampling.
axis : int, optional
The axis along which to decimate.
zero_phase : bool, optional
Prevent phase shift by filtering with `filtfilt` instead of `lfilter`
when using an IIR filter, and shifting the outputs back by the filter's
group delay when using an FIR filter. The default value of ``True`` is
recommended, since a phase shift is generally not desired.
.. versionadded:: 0.18.0
Returns
-------
y : ndarray
The down-sampled signal.
See Also
--------
resample : Resample up or down using the FFT method.
resample_poly : Resample using polyphase filtering and an FIR filter.
Notes
-----
The ``zero_phase`` keyword was added in 0.18.0.
The possibility to use instances of ``dlti`` as ``ftype`` was added in
0.18.0.
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is not None and not isinstance(n, int):
raise TypeError("n must be an integer")
if ftype == 'fir':
if n is None:
n = 30
system = dlti(firwin(n+1, 1. / q, window='hamming'), 1.)
elif ftype == 'iir':
if n is None:
n = 8
system = dlti(*cheby1(n, 0.05, 0.8 / q))
elif isinstance(ftype, dlti):
system = ftype._as_tf() # Avoids copying if already in TF form
n = np.max((system.num.size, system.den.size)) - 1
else:
raise ValueError('invalid ftype')
sl = [slice(None)] * x.ndim
if len(system.den) == 1: # FIR case
if zero_phase:
y = resample_poly(x, 1, q, axis=axis, window=system.num)
else:
# upfirdn is generally faster than lfilter by a factor equal to the
# downsampling factor, since it only calculates the needed outputs
n_out = x.shape[axis] // q + bool(x.shape[axis] % q)
y = upfirdn(system.num, x, up=1, down=q, axis=axis)
sl[axis] = slice(None, n_out, None)
else: # IIR case
if zero_phase:
y = filtfilt(system.num, system.den, x, axis=axis)
else:
y = lfilter(system.num, system.den, x, axis=axis)
sl[axis] = slice(None, None, q)
return y[sl]
| bsd-3-clause |
MartinDelzant/scikit-learn | sklearn/cluster/tests/test_hierarchical.py | 230 | 19795 | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hiearchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hiearchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
| bsd-3-clause |
pprett/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
kevin-intel/scikit-learn | examples/decomposition/plot_kernel_pca.py | 27 | 2290 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.scatter(X[reds, 0], X[reds, 1], c="red",
s=20, edgecolor='k')
plt.scatter(X[blues, 0], X[blues, 1], c="blue",
s=20, edgecolor='k')
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.scatter(X_pca[reds, 0], X_pca[reds, 1], c="red",
s=20, edgecolor='k')
plt.scatter(X_pca[blues, 0], X_pca[blues, 1], c="blue",
s=20, edgecolor='k')
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.scatter(X_kpca[reds, 0], X_kpca[reds, 1], c="red",
s=20, edgecolor='k')
plt.scatter(X_kpca[blues, 0], X_kpca[blues, 1], c="blue",
s=20, edgecolor='k')
plt.title("Projection by KPCA")
plt.xlabel(r"1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.scatter(X_back[reds, 0], X_back[reds, 1], c="red",
s=20, edgecolor='k')
plt.scatter(X_back[blues, 0], X_back[blues, 1], c="blue",
s=20, edgecolor='k')
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.tight_layout()
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
HealthCatalyst/healthcareai-py | healthcareai/tests/test_dataframe_transformers.py | 4 | 13176 | import pandas as pd
import numpy as np
import unittest
import healthcareai.common.transformers as transformers
class TestDataframeImputer(unittest.TestCase):
def test_imputation_false_returns_unmodified(self):
df = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
['a', None, None]
])
expected = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
['a', None, None]
])
result = transformers.DataFrameImputer(impute=False).fit_transform(df)
self.assertEqual(len(result), 4)
# Assert column types remain identical
self.assertTrue(list(result.dtypes) == list(df.dtypes))
self.assertTrue(expected.equals(result))
def test_imputation_removes_nans(self):
df = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
[np.nan, np.nan, np.nan]
])
expected = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
['b', 4 / 3.0, 5 / 3.0]
])
result = transformers.DataFrameImputer().fit_transform(df)
self.assertEqual(len(result), 4)
# Assert no NANs
self.assertFalse(result.isnull().values.any())
# Assert column types remain identical
self.assertTrue(list(result.dtypes) == list(df.dtypes))
self.assertTrue(expected.equals(result))
def test_imputation_removes_nones(self):
df = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
[None, None, None]
])
expected = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
['b', 4 / 3.0, 5 / 3.0]
])
result = transformers.DataFrameImputer().fit_transform(df)
self.assertEqual(len(result), 4)
self.assertFalse(result.isnull().values.any())
# Assert column types remain identical
self.assertTrue(list(result.dtypes) == list(df.dtypes))
self.assertTrue(expected.equals(result))
def test_imputation_for_mean_of_numeric_and_mode_for_categorical(self):
df = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
[None, None, None]
])
result = transformers.DataFrameImputer().fit_transform(df)
expected = pd.DataFrame([
['a', 1, 2],
['b', 1, 1],
['b', 2, 2],
['b', 4. / 3, 5. / 3]
])
self.assertEqual(len(result), 4)
# Assert imputed values
self.assertTrue(expected.equals(result))
# Assert column types remain identical
self.assertTrue(list(result.dtypes) == list(df.dtypes))
class TestDataFrameConvertTargetToBinary(unittest.TestCase):
def test_does_nothing_on_regression(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'outcome': [1, 5, 4],
'string_outcome': ['Y', 'N', 'Y']
})
result = transformers.DataFrameConvertTargetToBinary('regression', 'string_outcome').fit_transform(df)
self.assertTrue(df.equals(result))
def test_converts_y_n_for_classification(self):
df = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'outcome': [1, 5, 4],
'string_outcome': ['Y', 'N', 'Y']
})
expected = pd.DataFrame({
'category': ['a', 'b', 'c'],
'gender': ['F', 'M', 'F'],
'outcome': [1, 5, 4],
'string_outcome': [1, 0, 1]
})
result = transformers.DataFrameConvertTargetToBinary('classification', 'string_outcome').fit_transform(df)
self.assertTrue(expected.equals(result))
class TestDataFrameCreateDummyVariables(unittest.TestCase):
def test_dummies_for_binary_categorical(self):
df = pd.DataFrame({
'aa_outcome': [1, 5, 4],
'binary_category': ['a', 'b', 'a'],
'numeric': [1, 2, 1],
})
expected = pd.DataFrame({
'aa_outcome': [1, 5, 4],
'binary_category.b': [0, 1, 0],
'numeric': [1, 2, 1],
})
# cast as uint8 which the pandas.get_dummies() outputs
expected = expected.astype({'binary_category.b': 'uint8'})
result = transformers.DataFrameCreateDummyVariables('aa_outcome').fit_transform(df)
# Sort each because column order matters for equality checks
expected = expected.sort_index(axis=1)
result = result.sort_index(axis=1)
self.assertTrue(result.equals(expected))
def test_dummies_for_trinary_categorical(self):
df = pd.DataFrame({
'binary_category': ['a', 'b', 'c'],
'aa_outcome': [1, 5, 4]
})
expected = pd.DataFrame({
'aa_outcome': [1, 5, 4],
'binary_category.b': [0, 1, 0],
'binary_category.c': [0, 0, 1]
})
# cast as uint8 which the pandas.get_dummies() outputs
expected = expected.astype({'binary_category.b': 'uint8', 'binary_category.c': 'uint8'})
result = transformers.DataFrameCreateDummyVariables('aa_outcome').fit_transform(df)
# Sort each because column order matters for equality checks
expected = expected.sort_index(axis=1)
result = result.sort_index(axis=1)
self.assertTrue(result.equals(expected))
class TestDataFrameConvertColumnToNumeric(unittest.TestCase):
def test_integer_strings(self):
df = pd.DataFrame({
'integer_strings': ['1', '2', '3'],
'binary_category': ['a', 'b', 'a'],
'numeric': [1, 2, 1],
})
expected = pd.DataFrame({
'integer_strings': [1, 2, 3],
'binary_category': ['a', 'b', 'a'],
'numeric': [1, 2, 1],
})
result = transformers.DataFrameConvertColumnToNumeric('integer_strings').fit_transform(df)
# Sort each because column order matters for equality checks
expected = expected.sort_index(axis=1)
result = result.sort_index(axis=1)
self.assertTrue(result.equals(expected))
def test_integer(self):
df = pd.DataFrame({
'binary_category': ['a', 'b', 'a'],
'numeric': [1, 2, 1],
})
expected = pd.DataFrame({
'binary_category': ['a', 'b', 'a'],
'numeric': [1, 2, 1],
})
result = transformers.DataFrameConvertColumnToNumeric('numeric').fit_transform(df)
# Sort each because column order matters for equality checks
expected = expected.sort_index(axis=1)
result = result.sort_index(axis=1)
self.assertTrue(result.equals(expected))
class TestDataframeUnderSampler(unittest.TestCase):
def setUp(self):
# Build an imbalanced dataframe (20% True at_risk)
self.df = pd.DataFrame({'id': [1, 2, 3, 4, 5, 6, 7, 8],
'is_male': [1, 0, 1, 0, 0, 0, 1, 1],
'height': [100, 80, 70, 85, 100, 80, 70, 85],
'weight': [99, 46, 33, 44, 99, 46, 33, 44],
'at_risk': [True, False, False, False, True, False, False, False],
})
self.result = transformers.DataFrameUnderSampling('at_risk', random_seed=42).fit_transform(self.df)
print(self.result.head())
def test_returns_dataframe(self):
self.assertTrue(isinstance(self.result, pd.DataFrame))
def test_returns_smaller_dataframe(self):
self.assertLess(len(self.result), len(self.df))
def test_returns_balanced_classes(self):
# For sanity, verify that the original classes were imbalanced
original_value_counts = self.df['at_risk'].value_counts()
original_true_count = original_value_counts[1]
original_false_count = original_value_counts[0]
self.assertNotEqual(original_true_count, original_false_count)
# Verify that the new classes are balanced
value_counts = self.result['at_risk'].value_counts()
true_count = value_counts[1]
false_count = value_counts[0]
self.assertEqual(true_count, false_count)
class TestDataframeOverSampler(unittest.TestCase):
def setUp(self):
# Build an imbalanced dataframe (20% True at_risk)
self.df = pd.DataFrame({'id': [1, 2, 3, 4, 5, 6, 7, 8],
'is_male': [1, 0, 1, 0, 0, 0, 1, 1],
'height': [100, 80, 70, 85, 100, 80, 70, 85],
'weight': [99, 46, 33, 44, 99, 46, 33, 44],
'at_risk': [True, False, False, False, True, False, False, False],
})
self.result = transformers.DataFrameOverSampling('at_risk', random_seed=42).fit_transform(self.df)
# print(self.df.head(10))
# print(self.result.head(12))
def test_returns_dataframe(self):
self.assertTrue(isinstance(self.result, pd.DataFrame))
def test_returns_larger_dataframe(self):
self.assertGreater(len(self.result), len(self.df))
def test_returns_balanced_classes(self):
# For sanity, verify that the original classes were imbalanced
original_value_counts = self.df['at_risk'].value_counts()
original_true_count = original_value_counts[1]
original_false_count = original_value_counts[0]
self.assertNotEqual(original_true_count, original_false_count)
# Verify that the new classes are balanced
value_counts = self.result['at_risk'].value_counts()
true_count = value_counts[1]
false_count = value_counts[0]
# print('True Counts: {} --> {}, False Counts: {} --> {}'.format(original_true_count, true_count,
# original_false_count, false_count))
self.assertEqual(true_count, false_count)
class TestRemovesNANs(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame({'a': [1, None, 2, 3, None],
'b': ['m', 'f', None, 'f', None],
'c': [3, 4, 5, None, None],
'd': [None, 8, 1, 3, None],
'e': [None, None, None, None, None],
'label': ['Y', 'N', 'Y', 'N', None]})
def runTest(self):
df_final = transformers.DataFrameDropNaN().fit_transform(self.df)
self.assertTrue(df_final.equals(pd.DataFrame({'a': [1, None, 2, 3, None],
'b': ['m', 'f', None, 'f', None],
'c': [3, 4, 5, None, None],
'd': [None, 8, 1, 3, None],
'label': ['Y', 'N', 'Y', 'N', None]})))
def tearDown(self):
del self.df
class TestFeatureScaling(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame({'a': [1, 3, 2, 3],
'b': ['m', 'f', 'b', 'f'],
'c': [3, 4, 5, 5],
'd': [6, 8, 1, 3],
'label': ['Y', 'N', 'Y', 'N']})
self.df_repeat = pd.DataFrame({'a': [1, 3, 2, 3],
'b': ['m', 'f', 'b', 'f'],
'c': [3, 4, 5, 5],
'd': [6, 8, 1, 3],
'label': ['Y', 'N', 'Y', 'N']})
def runTest(self):
feature_scaling = transformers.DataFrameFeatureScaling()
df_final = feature_scaling.fit_transform(self.df).round(5)
self.assertTrue(df_final.equals(pd.DataFrame({'a': [-1.507557, 0.904534, -0.301511, 0.904534],
'b': ['m', 'f', 'b', 'f'],
'c': [-1.507557, -0.301511, 0.904534, 0.904534],
'd': [0.557086, 1.299867, -1.299867, -0.557086],
'label': ['Y', 'N', 'Y', 'N']}).round(5)))
df_reused = transformers.DataFrameFeatureScaling(reuse=feature_scaling).fit_transform(self.df_repeat).round(5)
self.assertTrue(df_reused.equals(pd.DataFrame({'a': [-1.507557, 0.904534, -0.301511, 0.904534],
'b': ['m', 'f', 'b', 'f'],
'c': [-1.507557, -0.301511, 0.904534, 0.904534],
'd': [0.557086, 1.299867, -1.299867, -0.557086],
'label': ['Y', 'N', 'Y', 'N']}).round(5)))
if __name__ == '__main__':
unittest.main()
| mit |
spallavolu/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Eric Martin <[email protected]>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
RomelTorres/alpha_vantage | test_alpha_vantage/test_alphavantage_async.py | 1 | 10226 | #!/usr/bin/env python
from ..alpha_vantage.async_support.alphavantage import AlphaVantage
from ..alpha_vantage.async_support.timeseries import TimeSeries
from ..alpha_vantage.async_support.techindicators import TechIndicators
from ..alpha_vantage.async_support.sectorperformance import SectorPerformances
from ..alpha_vantage.async_support.foreignexchange import ForeignExchange
from pandas import DataFrame as df, Timestamp
import asyncio
from aioresponses import aioresponses
from functools import wraps
import json
from os import path
import unittest
def make_async(f):
@wraps(f)
def test_wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
asyncio.get_event_loop().run_until_complete(future)
return test_wrapper
class TestAlphaVantageAsync(unittest.TestCase):
"""
Async local tests for AlphaVantage components
"""
_API_KEY_TEST = "test"
_API_EQ_NAME_TEST = 'MSFT'
@staticmethod
def get_file_from_url(url):
"""
Return the file name used for testing, found in the test data folder
formed using the original url
"""
tmp = url
for ch in [':', '/', '.', '?', '=', '&', ',']:
if ch in tmp:
tmp = tmp.replace(ch, '_')
path_dir = path.join(path.dirname(
path.abspath(__file__)), 'test_data/')
return path.join(path.join(path_dir, tmp))
def test_key_none(self):
"""
Raise an error when a key has not been given
"""
try:
AlphaVantage()
self.fail(msg='A None api key must raise an error')
except ValueError:
self.assertTrue(True)
@make_async
async def test_handle_api_call(self):
"""
Test that api call returns a json file as requested
"""
av = AlphaVantage(key=TestAlphaVantageAsync._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&apikey=test"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data = await av._handle_api_call(url)
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await av.close()
@make_async
async def test_rapidapi_key(self):
"""
Test that the rapidAPI key calls the rapidAPI endpoint
"""
ts = TimeSeries(key=TestAlphaVantageAsync._API_KEY_TEST, rapidapi=True)
url = "https://alpha-vantage.p.rapidapi.com/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ts.get_intraday(
"MSFT", interval='1min', outputsize='full')
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await ts.close()
@make_async
async def test_time_series_intraday(self):
"""
Test that api call returns a json file as requested
"""
ts = TimeSeries(key=TestAlphaVantageAsync._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ts.get_intraday(
"MSFT", interval='1min', outputsize='full')
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await ts.close()
@make_async
async def test_time_series_intraday_pandas(self):
"""
Test that api call returns a json file as requested
"""
ts = TimeSeries(key=TestAlphaVantageAsync._API_KEY_TEST,
output_format='pandas')
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ts.get_intraday(
"MSFT", interval='1min', outputsize='full')
self.assertIsInstance(
data, df, 'Result Data must be a pandas data frame')
await ts.close()
@make_async
async def test_time_series_intraday_date_indexing(self):
"""
Test that api call returns a pandas data frame with a date as index
"""
ts = TimeSeries(key=TestAlphaVantageAsync._API_KEY_TEST,
output_format='pandas', indexing_type='date')
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ts.get_intraday(
"MSFT", interval='1min', outputsize='full')
if ts.indexing_type == 'date':
assert isinstance(data.index[0], Timestamp)
else:
assert isinstance(data.index[0], str)
await ts.close()
@make_async
async def test_time_series_intraday_date_integer(self):
"""
Test that api call returns a pandas data frame with an integer as index
"""
ts = TimeSeries(key=TestAlphaVantageAsync._API_KEY_TEST,
output_format='pandas', indexing_type='integer')
url = "https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=MSFT&interval=1min&outputsize=full&apikey=test&datatype=json"
path_file = self.get_file_from_url("mock_time_series")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ts.get_intraday(
"MSFT", interval='1min', outputsize='full')
assert type(data.index[0]) == int
await ts.close()
@make_async
async def test_technical_indicator_sma_python3(self):
"""
Test that api call returns a json file as requested
"""
ti = TechIndicators(key=TestAlphaVantageAsync._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=SMA&symbol=MSFT&interval=15min&time_period=10&series_type=close&apikey=test"
path_file = self.get_file_from_url("mock_technical_indicator")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ti.get_sma("MSFT", interval='15min',
time_period=10, series_type='close')
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await ti.close()
@make_async
async def test_technical_indicator_sma_pandas(self):
"""
Test that api call returns a json file as requested
"""
ti = TechIndicators(
key=TestAlphaVantageAsync._API_KEY_TEST, output_format='pandas')
url = "https://www.alphavantage.co/query?function=SMA&symbol=MSFT&interval=15min&time_period=10&series_type=close&apikey=test"
path_file = self.get_file_from_url("mock_technical_indicator")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await ti.get_sma("MSFT", interval='15min',
time_period=10, series_type='close')
self.assertIsInstance(
data, df, 'Result Data must be a pandas data frame')
await ti.close()
@make_async
async def test_sector_perfomance_python3(self):
"""
Test that api call returns a json file as requested
"""
sp = SectorPerformances(key=TestAlphaVantageAsync._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=SECTOR&apikey=test"
path_file = self.get_file_from_url("mock_sector")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await sp.get_sector()
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await sp.close()
@make_async
async def test_sector_perfomance_pandas(self):
"""
Test that api call returns a json file as requested
"""
sp = SectorPerformances(
key=TestAlphaVantageAsync._API_KEY_TEST, output_format='pandas')
url = "https://www.alphavantage.co/query?function=SECTOR&apikey=test"
path_file = self.get_file_from_url("mock_sector")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await sp.get_sector()
self.assertIsInstance(
data, df, 'Result Data must be a pandas data frame')
await sp.close()
@make_async
async def test_foreign_exchange(self):
"""
Test that api call returns a json file as requested
"""
fe = ForeignExchange(key=TestAlphaVantageAsync._API_KEY_TEST)
url = "https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=BTC&to_currency=CNY&apikey=test"
path_file = self.get_file_from_url("mock_foreign_exchange")
with open(path_file) as f, aioresponses() as m:
m.get(url, payload=json.loads(f.read()))
data, _ = await fe.get_currency_exchange_rate(
from_currency='BTC', to_currency='CNY')
self.assertIsInstance(
data, dict, 'Result Data must be a dictionary')
await fe.close()
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/numpy/core/tests/test_multiarray.py | 12 | 221093 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
assert_equal(a.trace(0), 15)
assert_equal(a.trace(1), 18)
assert_equal(a.trace(-1), 13)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.trace(), [6, 8])
assert_equal(b.trace(0), [6, 8])
assert_equal(b.trace(1), [2, 3])
assert_equal(b.trace(-1), [4, 5])
assert_equal(b.trace(0, 0, 1), [6, 8])
assert_equal(b.trace(0, 0, 2), [5, 9])
assert_equal(b.trace(0, 1, 2), [3, 11])
assert_equal(b.trace(offset=1, axis1=0, axis2=2), [1, 3])
def test_trace_subclass(self):
# The class would need to overwrite trace to ensure single-element
# output also has the right subclass.
class MyArray(np.ndarray):
pass
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
assert isinstance(t, MyArray)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Test simple 1-d copy behaviour:
a = np.arange(10)[::2]
assert_(a.ravel('K').flags.owndata)
assert_(a.ravel('C').flags.owndata)
assert_(a.ravel('F').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# contiguous and 1-sized axis with non matching stride works:
a = np.arange(2**3)
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel(order='K'), np.arange(2**3))
# Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
assert_(a.ravel(order='C').flags.owndata)
assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
# Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_ravel_subclass(self):
class ArraySubclass(np.ndarray):
pass
a = np.arange(10).view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
a = np.arange(10)[::2].view(ArraySubclass)
assert_(isinstance(a.ravel('C'), ArraySubclass))
assert_(isinstance(a.ravel('F'), ArraySubclass))
assert_(isinstance(a.ravel('A'), ArraySubclass))
assert_(isinstance(a.ravel('K'), ArraySubclass))
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5864
return
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(ValueError, b[0].__setitem__, fnn, 1)
assert_raises(ValueError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
def test_vdot_uncontiguous(self):
for size in [2, 1000]:
# Different sizes match different branches in vdot.
a = np.zeros((size, 2, 2))
b = np.zeros((size, 2, 2))
a[:, 0, 0] = np.arange(size)
b[:, 0, 0] = np.arange(size) + 1
# Make a and b uncontiguous:
a = a[..., 0]
b = b[..., 0]
assert_equal(np.vdot(a, b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy()),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy(), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a.copy('F'), b),
np.vdot(a.flatten(), b.flatten()))
assert_equal(np.vdot(a, b.copy('F')),
np.vdot(a.flatten(), b.flatten()))
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
# Temporarily disable __numpy_ufunc__ for 1.10; see gh-5844
return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
def test_inner_product_with_various_contiguities(self):
# github issue 6532
for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
# check an inner product involving a matrix transpose
A = np.array([[1, 2], [3, 4]], dtype=dt)
B = np.array([[1, 3], [2, 4]], dtype=dt)
C = np.array([1, 1], dtype=dt)
desired = np.array([4, 6], dtype=dt)
assert_equal(np.inner(A.T, C), desired)
assert_equal(np.inner(B, C), desired)
# check an inner product involving an aliased and reversed view
a = np.arange(5).astype(dt)
b = a[::-1]
desired = np.array(10, dtype=dt).item()
assert_equal(np.inner(b, a), desired)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| mit |
anntzer/scikit-learn | sklearn/metrics/_ranking.py | 7 | 66752 | """Metrics to assess performance on classification task given scores.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
import warnings
from functools import partial
import numpy as np
from scipy.sparse import csr_matrix
from scipy.stats import rankdata
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.extmath import stable_cumsum
from ..utils.sparsefuncs import count_nonzero
from ..utils.validation import _deprecate_positional_args
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import label_binarize
from ..utils._encode import _encode, _unique
from ._base import (
_average_binary_score,
_average_multiclass_ovo_score,
_check_pos_label_consistency,
)
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : ndarray of shape (n,)
x coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : ndarray of shape, (n,)
y coordinates.
Returns
-------
auc : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing "
": {}.".format(x))
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
@_deprecate_positional_args
def average_precision_score(y_true, y_score, *, average="macro", pos_label=1,
sample_weight=None):
"""Compute average precision (AP) from prediction scores.
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,) or (n_samples, n_classes)
True binary labels or binary label indicators.
y_score : ndarray of shape (n_samples,) or (n_samples, n_classes)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
average : {'micro', 'samples', 'weighted', 'macro'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
pos_label : int or str, default=1
The label of the positive class. Only applied to binary ``y_true``.
For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
average_precision : float
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Notes
-----
.. versionchanged:: 0.19
Instead of linearly interpolating between operating points, precisions
are weighted by the change in recall since the last operating point.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores)
0.83...
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, pos_label=1, sample_weight=None):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve
return -np.sum(np.diff(recall) * np.array(precision)[:-1])
y_type = type_of_target(y_true)
if y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError("Parameter pos_label is fixed to 1 for "
"multilabel-indicator y_true. Do not set "
"pos_label or set pos_label to 1.")
elif y_type == "binary":
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
average_precision = partial(_binary_uninterpolated_average_precision,
pos_label=pos_label)
return _average_binary_score(average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def det_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute error rates for different probability thresholds.
.. note::
This metric is used for evaluation of ranking and error tradeoffs of
a binary classification task.
Read more in the :ref:`User Guide <det_curve>`.
.. versionadded:: 0.24
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance propability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
See Also
--------
plot_det_curve : Plot detection error tradeoff (DET) curve.
DetCurveDisplay : DET curve visualization.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
precision_recall_curve : Compute precision-recall curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import det_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = det_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. Detection error "
"tradeoff curve is not defined in that case.")
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero
first_ind = (
fps.searchsorted(fps[0], side='right') - 1
if fps.searchsorted(fps[0], side='right') > 0
else None
)
# stop with false negatives zero
last_ind = tps.searchsorted(tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (
fps[sl][::-1] / n_count,
fns[sl][::-1] / p_count,
thresholds[sl][::-1]
)
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, _ = roc_curve(y_true, y_score,
sample_weight=sample_weight)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr)
# Add a single point at max_fpr by linear interpolation
stop = np.searchsorted(fpr, max_fpr, 'right')
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
partial_auc = auc(fpr, tpr)
# McClish correction: standardize result to be 0.5 if non-discriminant
# and 1 if maximal
min_area = 0.5 * max_fpr**2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
@_deprecate_positional_args
def roc_auc_score(y_true, y_score, *, average="macro", sample_weight=None,
max_fpr=None, multi_class="raise", labels=None):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC)
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the binary case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the multiclass case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the multilabel case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
0.99...
>>> roc_auc_score(y, clf.decision_function(X))
0.99...
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
0.99...
Multilabel case:
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_pred = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])
>>> roc_auc_score(y, y_pred, average=None)
array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])
"""
y_type = type_of_target(y_true)
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (y_type == "binary" and
y_score.ndim == 2 and
y_score.shape[1] > 2):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.:
raise ValueError("Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr))
if multi_class == 'raise':
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
else: # multilabel-indicator
return _average_binary_score(partial(_binary_roc_auc_score,
max_fpr=max_fpr),
y_true, y_score, average,
sample_weight=sample_weight)
def _multiclass_roc_auc_score(y_true, y_score, labels,
multi_class, average, sample_weight):
"""Multiclass roc auc score.
Parameters
----------
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class
labels : array-like of shape (n_classes,) or None
List of labels to index ``y_score`` used for multiclass. If ``None``,
the lexical order of ``y_true`` is used to index ``y_score``.
multi_class : {'ovr', 'ovo'}
Determines the type of multiclass configuration to use.
``'ovr'``:
Calculate metrics for the multiclass case using the one-vs-rest
approach.
``'ovo'``:
Calculate metrics for the multiclass case using the one-vs-one
approach.
average : {'macro', 'weighted'}
Determines the type of averaging performed on the pairwise binary
metric scores
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
"""
# validation of the input y_score
if not np.allclose(1, y_score.sum(axis=1)):
raise ValueError(
"Target scores need to be probabilities for multiclass "
"roc_auc, i.e. they should sum up to 1.0 over classes")
# validation for multiclass parameter specifications
average_options = ("macro", "weighted")
if average not in average_options:
raise ValueError("average must be one of {0} for "
"multiclass problems".format(average_options))
multiclass_options = ("ovo", "ovr")
if multi_class not in multiclass_options:
raise ValueError("multi_class='{0}' is not supported "
"for multiclass ROC AUC, multi_class must be "
"in {1}".format(
multi_class, multiclass_options))
if labels is not None:
labels = column_or_1d(labels)
classes = _unique(labels)
if len(classes) != len(labels):
raise ValueError("Parameter 'labels' must be unique")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered")
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of given labels, {0}, not equal to the number "
"of columns in 'y_score', {1}".format(
len(classes), y_score.shape[1]))
if len(np.setdiff1d(y_true, classes)):
raise ValueError(
"'y_true' contains labels not in parameter 'labels'")
else:
classes = _unique(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of "
"columns in 'y_score'")
if multi_class == "ovo":
if sample_weight is not None:
raise ValueError("sample_weight is not supported "
"for multiclass one-vs-one ROC AUC, "
"'sample_weight' must be None in this case.")
y_true_encoded = _encode(y_true, uniques=classes)
# Hand & Till (2001) implementation (ovo)
return _average_multiclass_ovo_score(_binary_roc_auc_score,
y_true_encoded,
y_score, average=average)
else:
# ovr is same as multi-label
y_true_multilabel = label_binarize(y_true, classes=classes)
return _average_binary_score(_binary_roc_auc_score, y_true_multilabel,
y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True targets of binary classification.
y_score : ndarray of shape (n_samples,)
Estimated probabilities or output of a decision function.
pos_label : int or str, default=None
The label of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fps : ndarray of shape (n_thresholds,)
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : ndarray of shape (n_thresholds,)
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
"""
# Check to make sure y_true is valid
y_type = type_of_target(y_true)
if not (y_type == "binary" or
(y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
@_deprecate_positional_args
def precision_recall_curve(y_true, probas_pred, *, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds.
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
y axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
probas_pred : ndarray of shape (n_samples,)
Estimated probabilities or output of a decision function.
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
precision : ndarray of shape (n_thresholds + 1,)
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : ndarray of shape (n_thresholds + 1,)
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : ndarray of shape (n_thresholds,)
Increasing thresholds on the decision function used to compute
precision and recall. n_thresholds <= len(np.unique(probas_pred)).
See Also
--------
plot_precision_recall_curve : Plot Precision Recall Curve for binary
classifiers.
PrecisionRecallDisplay : Precision Recall visualization.
average_precision_score : Compute average precision from prediction scores.
det_curve: Compute error rates for different probability thresholds.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision
array([0.66666667, 0.5 , 1. , 1. ])
>>> recall
array([1. , 0.5, 0.5, 0. ])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
@_deprecate_positional_args
def roc_curve(y_true, y_score, *, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC).
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=True
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : ndarray of shape (>2,)
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= `thresholds[i]`.
tpr : ndarray of shape (>2,)
Increasing true positive rates such that element `i` is the true
positive rate of predictions with score >= `thresholds[i]`.
thresholds : ndarray of shape = (n_thresholds,)
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See Also
--------
plot_roc_curve : Plot Receiver operating characteristic (ROC) curve.
RocCurveDisplay : ROC Curve visualization.
det_curve: Compute error rates for different probability thresholds.
roc_auc_score : Compute the area under the ROC curve.
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition
Letters, 2006, 27(8):861-874.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([0. , 0. , 0.5, 0.5, 1. ])
>>> tpr
array([0. , 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([1.8 , 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
@_deprecate_positional_args
def label_ranking_average_precision_score(y_true, y_score, *,
sample_weight=None):
"""Compute ranking-based average precision.
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 0.20
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score)
0.416...
"""
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formatted array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
aux = 1.
else:
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
aux = (L / rank).mean()
if sample_weight is not None:
aux = aux * sample_weight[i]
out += aux
if sample_weight is None:
out /= n_samples
else:
out /= np.sum(sample_weight)
return out
@_deprecate_positional_args
def coverage_error(y_true, y_score, *, sample_weight=None):
"""Coverage error measure.
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Note: Our implementation's score is 1 greater than the one given in
Tsoumakas et al., 2010. This extends it to handle the degenerate case
in which an instance has 0 true labels.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
@_deprecate_positional_args
def label_ranking_loss(y_true, y_score, *, sample_weight=None):
"""Compute Ranking loss measure.
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : {ndarray, sparse matrix} of shape (n_samples, n_labels)
True binary labels in binary indicator format.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = np.bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = np.bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
def _dcg_sample_scores(y_true, y_score, k=None,
log_base=2, ignore_ties=False):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : ndarray of shape (n_samples,)
The DCG score for each sample.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
"""
discount = 1 / (np.log(np.arange(y_true.shape[1]) + 2) / np.log(log_base))
if k is not None:
discount[k:] = 0
if ignore_ties:
ranking = np.argsort(y_score)[:, ::-1]
ranked = y_true[np.arange(ranking.shape[0])[:, np.newaxis], ranking]
cumulative_gains = discount.dot(ranked.T)
else:
discount_cumsum = np.cumsum(discount)
cumulative_gains = [_tie_averaged_dcg(y_t, y_s, discount_cumsum)
for y_t, y_s in zip(y_true, y_score)]
cumulative_gains = np.asarray(cumulative_gains)
return cumulative_gains
def _tie_averaged_dcg(y_true, y_score, discount_cumsum):
"""
Compute DCG by averaging over possible permutations of ties.
The gain (`y_true`) of an index falling inside a tied group (in the order
induced by `y_score`) is replaced by the average gain within this group.
The discounted gain for a tied group is then the average `y_true` within
this group times the sum of discounts of the corresponding ranks.
This amounts to averaging scores for all possible orderings of the tied
groups.
(note in the case of dcg@k the discount is 0 after index k)
Parameters
----------
y_true : ndarray
The true relevance scores.
y_score : ndarray
Predicted scores.
discount_cumsum : ndarray
Precomputed cumulative sum of the discounts.
Returns
-------
discounted_cumulative_gain : float
The discounted cumulative gain.
References
----------
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
"""
_, inv, counts = np.unique(
- y_score, return_inverse=True, return_counts=True)
ranked = np.zeros(len(counts))
np.add.at(ranked, inv, y_true)
ranked /= counts
groups = np.cumsum(counts) - 1
discount_sums = np.empty(len(counts))
discount_sums[0] = discount_cumsum[groups[0]]
discount_sums[1:] = np.diff(discount_cumsum[groups])
return (ranked * discount_sums).sum()
def _check_dcg_target_type(y_true):
y_type = type_of_target(y_true)
supported_fmt = ("multilabel-indicator", "continuous-multioutput",
"multiclass-multioutput")
if y_type not in supported_fmt:
raise ValueError(
"Only {} formats are supported. Got {} instead".format(
supported_fmt, y_type))
@_deprecate_positional_args
def dcg_score(y_true, y_score, *, k=None,
log_base=2, sample_weight=None, ignore_ties=False):
"""Compute Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Usually the Normalized Discounted Cumulative Gain (NDCG, computed by
ndcg_score) is preferred.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
log_base : float, default=2
Base of the logarithm used for the discount. A low value means a
sharper discount (top results are more important).
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If None, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
discounted_cumulative_gain : float
The averaged sample DCG scores.
See Also
--------
ndcg_score : The Discounted Cumulative Gain divided by the Ideal Discounted
Cumulative Gain (the DCG obtained for a perfect ranking), in order to
have a score between 0 and 1.
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_.
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013).
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> from sklearn.metrics import dcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict scores for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> dcg_score(true_relevance, scores)
9.49...
>>> # we can set k to truncate the sum; only top k answers contribute
>>> dcg_score(true_relevance, scores, k=2)
5.63...
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average true
>>> # relevance of our top predictions: (10 + 5) / 2 = 7.5
>>> dcg_score(true_relevance, scores, k=1)
7.5
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> dcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
5.0
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
return np.average(
_dcg_sample_scores(
y_true, y_score, k=k, log_base=log_base,
ignore_ties=ignore_ties),
weights=sample_weight)
def _ndcg_sample_scores(y_true, y_score, k=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : ndarray of shape (n_samples,)
The NDCG score for each sample (float in [0., 1.]).
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
"""
gain = _dcg_sample_scores(y_true, y_score, k, ignore_ties=ignore_ties)
# Here we use the order induced by y_true so we can ignore ties since
# the gain associated to tied indices is the same (permuting ties doesn't
# change the value of the re-ordered y_true)
normalizing_gain = _dcg_sample_scores(y_true, y_true, k, ignore_ties=True)
all_irrelevant = normalizing_gain == 0
gain[all_irrelevant] = 0
gain[~all_irrelevant] /= normalizing_gain[~all_irrelevant]
return gain
@_deprecate_positional_args
def ndcg_score(y_true, y_score, *, k=None, sample_weight=None,
ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric yields a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If None, use all
outputs.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If None, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties)
return np.average(gain, weights=sample_weight)
def top_k_accuracy_score(y_true, y_score, *, k=2, normalize=True,
sample_weight=None, labels=None):
"""Top-k Accuracy classification score.
This metric computes the number of times where the correct label is among
the top `k` labels predicted (ranked by predicted scores). Note that the
multilabel case isn't covered here.
Read more in the :ref:`User Guide <top_k_accuracy_score>`
Parameters
----------
y_true : array-like of shape (n_samples,)
True labels.
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores. These can be either probability estimates or
non-thresholded decision values (as returned by
:term:`decision_function` on some classifiers). The binary case expects
scores with shape (n_samples,) while the multiclass case expects scores
with shape (n_samples, n_classes). In the nulticlass case, the order of
the class scores must correspond to the order of ``labels``, if
provided, or else to the numerical or lexicographical order of the
labels in ``y_true``.
k : int, default=2
Number of most likely outcomes considered to find the correct label.
normalize : bool, default=True
If `True`, return the fraction of correctly classified samples.
Otherwise, return the number of correctly classified samples.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
labels : array-like of shape (n_classes,), default=None
Multiclass only. List of labels that index the classes in ``y_score``.
If ``None``, the numerical or lexicographical order of the labels in
``y_true`` is used.
Returns
-------
score : float
The top-k accuracy score. The best performance is 1 with
`normalize == True` and the number of samples with
`normalize == False`.
See also
--------
accuracy_score
Notes
-----
In cases where two or more labels are assigned equal predicted scores,
the labels with the highest indices will be chosen first. This might
impact the result if the correct label falls after the threshold because
of that.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import top_k_accuracy_score
>>> y_true = np.array([0, 1, 2, 2])
>>> y_score = np.array([[0.5, 0.2, 0.2], # 0 is in top 2
... [0.3, 0.4, 0.2], # 1 is in top 2
... [0.2, 0.4, 0.3], # 2 is in top 2
... [0.7, 0.2, 0.1]]) # 2 isn't in top 2
>>> top_k_accuracy_score(y_true, y_score, k=2)
0.75
>>> # Not normalizing gives the number of "correctly" classified samples
>>> top_k_accuracy_score(y_true, y_score, k=2, normalize=False)
3
"""
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_true = column_or_1d(y_true)
y_type = type_of_target(y_true)
y_score = check_array(y_score, ensure_2d=False)
y_score = column_or_1d(y_score) if y_type == 'binary' else y_score
check_consistent_length(y_true, y_score, sample_weight)
if y_type not in {'binary', 'multiclass'}:
raise ValueError(
f"y type must be 'binary' or 'multiclass', got '{y_type}' instead."
)
y_score_n_classes = y_score.shape[1] if y_score.ndim == 2 else 2
if labels is None:
classes = _unique(y_true)
n_classes = len(classes)
if n_classes != y_score_n_classes:
raise ValueError(
f"Number of classes in 'y_true' ({n_classes}) not equal "
f"to the number of classes in 'y_score' ({y_score_n_classes})."
)
else:
labels = column_or_1d(labels)
classes = _unique(labels)
n_labels = len(labels)
n_classes = len(classes)
if n_classes != n_labels:
raise ValueError("Parameter 'labels' must be unique.")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered.")
if n_classes != y_score_n_classes:
raise ValueError(
f"Number of given labels ({n_classes}) not equal to the "
f"number of classes in 'y_score' ({y_score_n_classes})."
)
if len(np.setdiff1d(y_true, classes)):
raise ValueError(
"'y_true' contains labels not in parameter 'labels'."
)
if k >= n_classes:
warnings.warn(
f"'k' ({k}) greater than or equal to 'n_classes' ({n_classes}) "
"will result in a perfect score and is therefore meaningless.",
UndefinedMetricWarning
)
y_true_encoded = _encode(y_true, uniques=classes)
if y_type == 'binary':
if k == 1:
threshold = .5 if y_score.min() >= 0 and y_score.max() <= 1 else 0
y_pred = (y_score > threshold).astype(np.int64)
hits = y_pred == y_true_encoded
else:
hits = np.ones_like(y_score, dtype=np.bool_)
elif y_type == 'multiclass':
sorted_pred = np.argsort(y_score, axis=1, kind='mergesort')[:, ::-1]
hits = (y_true_encoded == sorted_pred[:, :k].T).any(axis=0)
if normalize:
return np.average(hits, weights=sample_weight)
elif sample_weight is None:
return np.sum(hits)
else:
return np.dot(hits, sample_weight)
| bsd-3-clause |
jiangzhonglian/MachineLearning | src/py3.x/ml/9.RegTrees/sklearn-regressTree-demo.py | 1 | 1681 | #!/usr/bin/python
# coding:utf8
"""
Created on 2017-07-13
Updated on 2017-07-13
RegressionTree:树回归
Author: 小瑶
GitHub: https://github.com/apachecn/AiLearning
"""
print(__doc__)
# 引入必要的模型和库
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# 创建一个随机的数据集
# 参考 https://docs.scipy.org/doc/numpy-1.6.0/reference/generated/numpy.random.mtrand.RandomState.html
rng = np.random.RandomState(1)
# print 'lalalalala===', rng
# rand() 是给定形状的随机值,rng.rand(80, 1)即矩阵的形状是 80行,1列
# sort()
X = np.sort(5 * rng.rand(80, 1), axis=0)
# print 'X=', X
y = np.sin(X).ravel()
# print 'y=', y
y[::5] += 3 * (0.5 - rng.rand(16))
# print 'yyy=', y
# 拟合回归模型
# regr_1 = DecisionTreeRegressor(max_depth=2)
# 保持 max_depth=5 不变,增加 min_samples_leaf=6 的参数,效果进一步提升了
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_2 = DecisionTreeRegressor(min_samples_leaf=6)
# regr_3 = DecisionTreeRegressor(max_depth=4)
# regr_1.fit(X, y)
regr_2.fit(X, y)
# regr_3.fit(X, y)
# 预测
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
# y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# y_3 = regr_3.predict(X_test)
# 绘制结果
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
# plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
# plt.plot(X_test, y_3, color="red", label="max_depth=3", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show() | gpl-3.0 |
myuuuuun/ThinkStats2-Notebook | code/density.py | 67 | 2934 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import math
import random
import brfss
import first
import thinkstats2
import thinkplot
def Summarize(data):
"""Prints summary statistics.
data: pandas Series
"""
mean = data.mean()
std = data.std()
median = thinkstats2.Median(data)
print('mean', mean)
print('std', std)
print('median', median)
print('skewness', thinkstats2.Skewness(data))
print('pearson skewness',
thinkstats2.PearsonMedianSkewness(data))
return mean, median
def ComputeSkewnesses():
"""Plots KDE of birthweight and adult weight.
"""
def VertLine(x, y):
thinkplot.Plot([x, x], [0, y], color='0.6', linewidth=1)
live, firsts, others = first.MakeFrames()
data = live.totalwgt_lb.dropna()
print('Birth weight')
mean, median = Summarize(data)
y = 0.35
VertLine(mean, y)
thinkplot.Text(mean-0.15, 0.1*y, 'mean', horizontalalignment='right')
VertLine(median, y)
thinkplot.Text(median+0.1, 0.1*y, 'median', horizontalalignment='left')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Save(root='density_totalwgt_kde',
xlabel='lbs',
ylabel='PDF')
df = brfss.ReadBrfss(nrows=None)
data = df.wtkg2.dropna()
print('Adult weight')
mean, median = Summarize(data)
y = 0.02499
VertLine(mean, y)
thinkplot.Text(mean+1, 0.1*y, 'mean', horizontalalignment='left')
VertLine(median, y)
thinkplot.Text(median-1.5, 0.1*y, 'median', horizontalalignment='right')
pdf = thinkstats2.EstimatedPdf(data)
thinkplot.Pdf(pdf, label='adult weight')
thinkplot.Save(root='density_wtkg2_kde',
xlabel='kg',
ylabel='PDF',
xlim=[0, 200])
def MakePdfExample(n=500):
"""Plots a normal density function and a KDE estimate.
n: sample size
"""
# mean and var of women's heights in cm, from the BRFSS
mean, var = 163, 52.8
std = math.sqrt(var)
# make a PDF and compute a density, FWIW
pdf = thinkstats2.NormalPdf(mean, std)
print(pdf.Density(mean + std))
# make a PMF and plot it
thinkplot.PrePlot(2)
thinkplot.Pdf(pdf, label='normal')
# make a sample, make an estimated PDF, and plot it
sample = [random.gauss(mean, std) for _ in range(n)]
sample_pdf = thinkstats2.EstimatedPdf(sample)
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Save(root='pdf_example',
xlabel='Height (cm)',
ylabel='Density')
def main():
thinkstats2.RandomSeed(17)
MakePdfExample()
ComputeSkewnesses()
if __name__ == '__main__':
main()
| gpl-2.0 |
jereze/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 30 | 1812 | # Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
"""
Test BayesianRidge on diabetes
"""
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
"""
Test BayesianRidge on toy
"""
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
"""
Test BayesianRegression ARD classifier
"""
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
kviebahn/beam-cam | GaussBeamSimulation.py | 1 | 4694 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 15:57:43 2015
@author: Michael
This file is part of beam-cam, a camera project to monitor and characterise laser beams.
Copyright (C) 2015 Christian Gross <[email protected]>, Timon Hilker <[email protected]>, Michael Hoese <[email protected]>, and Konrad Viebahn <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Please see the README.md file for a copy of the GNU General Public License, or otherwise find it on <http://www.gnu.org/licenses/>.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def rotmatrix(alpha):
return np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
def gaussian2(xy, *p):
'''returns gaussfunction for arbitrarily positioned and rotated 2d gauss'''
A, sx, x0, y0, sy,alpha,off = p
# M = np.array([[Bx,Bxy],[Bxy,By]])
R = rotmatrix(alpha)
M = np.dot(R,np.dot(np.array([[1./sx**2,0],[0,1./sy**2]]),R.T))
r = np.array([xy[:,0]-x0,xy[:,1]-y0])
g = A*np.exp(-2*np.sum(np.dot(M,r)*r,axis=0)) + off
# print g
return g
class GaussBeamSimulation:
'''Class allows to simulate a gauss beam profile image captured by a camera'''
def __init__(self):
self.width = 754
self.height = 480
def NewImage(self):
self.image = np.zeros((self.height,self.width))
def AddWhiteNoise(self,expectation=150):
noise = np.random.poisson(expectation,self.image.shape).astype(int)
self.image += noise
def AddRandomGauss(self,meanamplitude=200,meansigmax=30,meansigmay=30,meanposition=[376,239]):
amplitude = np.random.poisson(meanamplitude)
sigmax = np.random.poisson(meansigmax)
sigmay = np.random.poisson(meansigmay)
position = [0,0]
position[0] = np.random.poisson(meanposition[0])
position[1] = np.random.poisson(meanposition[1])
rotationangle = np.random.choice([0,np.pi/2.])
# rotationangle = 0
offset = 0.
ny,nx = self.image.shape
x = np.arange(self.width)
y = np.arange(self.height)
XY = np.meshgrid(x,y)
XYflat = np.array(XY).reshape(2,nx*ny).T
params = [amplitude,sigmax,position[0],position[1],sigmay,rotationangle,offset]
gaussflat = gaussian2(XYflat,*params)
gauss = np.array(gaussflat).reshape(ny,nx)
self.image +=gauss
def SimulateTotalImage(self,expectation=150,meanamplitude=200,meansigmax=20,meansigmay=20,meanposition=[376,239]):
self.image = np.zeros((self.height,self.width))
noise = np.random.poisson(expectation,self.image.shape).astype(int)
amplitude = np.random.poisson(meanamplitude)
sigmax = np.random.poisson(meansigmax)
sigmay = np.random.poisson(meansigmay)
position = [0,0]
position[0] = np.random.poisson(meanposition[0])
position[1] = np.random.poisson(meanposition[1])
# rotationangle = np.random.uniform(0,np.pi)
rotationangle = np.random.choice([0,np.pi/2.])
offset = 0.
ny,nx = self.image.shape
x = np.arange(self.width)
y = np.arange(self.height)
XY = np.meshgrid(x,y)
XYflat = np.array(XY).reshape(2,nx*ny).T
params = [amplitude,sigmax,position[0],position[1],sigmay,rotationangle,offset]
gaussflat = gaussian2(XYflat,*params)
gauss = np.array(gaussflat).reshape(ny,nx)
self.image = (self.image + noise + gauss).astype(int)
#image saturation
self.image[np.where(self.image>2**14)]=2**14
def CreateImages(self,number=10):
i = 0
self.imageslist = []
for i in range(number):
self.SimulateTotalImage()
self.imageslist.append(self.image)
i += 1
def ChooseImage(self,number=10):
i = np.random.randint(0,number-1)
# print i, 'i'
self.image = self.imageslist[i]
def ShowImage(self):
plt.figure()
plt.imshow(self.image, cmap = cm.Greys_r)
plt.colorbar()
plt.show()
if __name__=="__main__":
test = GaussBeamSimulation()
test.NewImage()
test.AddWhiteNoise()
test.AddRandomGauss()
test.ShowImage()
| gpl-3.0 |
bmmalone/pymisc-utils | pyllars/nlp_utils.py | 1 | 1790 | """
This module contains helpers for performing natural language processing
tasks. Often, it wraps operations from nltk: http://www.nltk.org/
"""
# grab some sample text
import nltk
import nltk.stem
import nltk.corpus
import string
PUNCTUATION_TABLE = str.maketrans('', '', string.punctuation)
ENGLISH_STOP_WORDS = set(nltk.corpus.stopwords.words('english'))
SNOWBALL_STEMMER = nltk.stem.snowball.SnowballStemmer("english")
def clean_doc(doc):
""" Clean the given string using a standard pipeline
In particular, this function performs the following steps:
1. Tokenize the text
2. Convert to lower case
3. Remove `string.punctuation` characters from all words
4. Remove words which contain non-alphanumeric characters
5. Remove stop words (`nltk.corpus.stopwords.words('english')`)
6. Stem all remaining words (`nltk.stem.snowball.SnowballStemmer("english")`)
7. Join the stemmed words back with spaces
After this operation, the text is ready for downstream with, for example,
the CountVectorizer from sklearn.
Parameters
----------
doc: str
The string to clean up
Returns
-------
cleaned_doc: str
The cleaned up string, using the pipeline described above
"""
# tokenize into words
words = nltk.word_tokenize(doc)
# convert to lower case
words = [w.lower() for w in words]
# remove punctuation from each word
words = [w.translate(PUNCTUATION_TABLE) for w in words]
# remove non-alphabetic words
words = [w for w in words if w.isalpha()]
# filter stopwords
words = [w for w in words if not w in ENGLISH_STOP_WORDS]
# stem
words = [SNOWBALL_STEMMER.stem(w) for w in words]
# join back
words = ' '.join(words)
return words
| mit |
olologin/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/testing/decorators.py | 2 | 10495 | from __future__ import print_function
from matplotlib.testing.noseclasses import KnownFailureTest, \
KnownFailureDidNotFailTest, ImageComparisonFailure
import os, sys, shutil
import nose
import matplotlib
import matplotlib.tests
import matplotlib.units
from matplotlib import ticker
from matplotlib import pyplot as plt
from matplotlib import ft2font
import numpy as np
from matplotlib.testing.compare import comparable_formats, compare_images, \
make_test_filename
import warnings
def knownfailureif(fail_condition, msg=None, known_exception_class=None ):
"""
Assume a will fail if *fail_condition* is True. *fail_condition*
may also be False or the string 'indeterminate'.
*msg* is the error message displayed for the test.
If *known_exception_class* is not None, the failure is only known
if the exception is an instance of this class. (Default = None)
"""
# based on numpy.testing.dec.knownfailureif
if msg is None:
msg = 'Test known to fail'
def known_fail_decorator(f):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
def failer(*args, **kwargs):
try:
# Always run the test (to generate images).
result = f(*args, **kwargs)
except Exception as err:
if fail_condition:
if known_exception_class is not None:
if not isinstance(err,known_exception_class):
# This is not the expected exception
raise
# (Keep the next ultra-long comment so in shows in console.)
raise KnownFailureTest(msg) # An error here when running nose means that you don't have the matplotlib.testing.noseclasses:KnownFailure plugin in use.
else:
raise
if fail_condition and fail_condition != 'indeterminate':
raise KnownFailureDidNotFailTest(msg)
return result
return nose.tools.make_decorator(f)(failer)
return known_fail_decorator
class CleanupTest(object):
@classmethod
def setup_class(cls):
cls.original_units_registry = matplotlib.units.registry.copy()
@classmethod
def teardown_class(cls):
plt.close('all')
matplotlib.tests.setup()
matplotlib.units.registry.clear()
matplotlib.units.registry.update(cls.original_units_registry)
warnings.resetwarnings() #reset any warning filters set in tests
def test(self):
self._func()
def cleanup(func):
name = func.__name__
func = staticmethod(func)
func.__get__(1).__name__ = '_private'
new_class = type(
name,
(CleanupTest,),
{'_func': func})
return new_class
def check_freetype_version(ver):
if ver is None:
return True
from distutils import version
if isinstance(ver, str):
ver = (ver, ver)
ver = [version.StrictVersion(x) for x in ver]
found = version.StrictVersion(ft2font.__freetype_version__)
return found >= ver[0] and found <= ver[1]
class ImageComparisonTest(CleanupTest):
@classmethod
def setup_class(cls):
CleanupTest.setup_class()
cls._func()
@staticmethod
def remove_text(figure):
figure.suptitle("")
for ax in figure.get_axes():
ax.set_title("")
ax.xaxis.set_major_formatter(ticker.NullFormatter())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
def test(self):
baseline_dir, result_dir = _image_directories(self._func)
for fignum, baseline in zip(plt.get_fignums(), self._baseline_images):
figure = plt.figure(fignum)
for extension in self._extensions:
will_fail = not extension in comparable_formats()
if will_fail:
fail_msg = 'Cannot compare %s files on this system' % extension
else:
fail_msg = 'No failure expected'
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.' + extension
if extension == 'eps' and not os.path.exists(orig_expected_fname):
orig_expected_fname = os.path.join(baseline_dir, baseline) + '.pdf'
expected_fname = make_test_filename(os.path.join(
result_dir, os.path.basename(orig_expected_fname)), 'expected')
actual_fname = os.path.join(result_dir, baseline) + '.' + extension
if os.path.exists(orig_expected_fname):
shutil.copyfile(orig_expected_fname, expected_fname)
else:
will_fail = True
fail_msg = 'Do not have baseline image %s' % expected_fname
@knownfailureif(
will_fail, fail_msg,
known_exception_class=ImageComparisonFailure)
def do_test():
if self._remove_text:
self.remove_text(figure)
figure.savefig(actual_fname)
err = compare_images(expected_fname, actual_fname,
self._tol, in_decorator=True)
try:
if not os.path.exists(expected_fname):
raise ImageComparisonFailure(
'image does not exist: %s' % expected_fname)
if err:
raise ImageComparisonFailure(
'images not close: %(actual)s vs. %(expected)s '
'(RMS %(rms).3f)'%err)
except ImageComparisonFailure:
if not check_freetype_version(self._freetype_version):
raise KnownFailureTest(
"Mismatched version of freetype. Test requires '%s', you have '%s'" %
(self._freetype_version, ft2font.__freetype_version__))
raise
yield (do_test,)
def image_comparison(baseline_images=None, extensions=None, tol=1e-3,
freetype_version=None, remove_text=False):
"""
call signature::
image_comparison(baseline_images=['my_figure'], extensions=None)
Compare images generated by the test with those specified in
*baseline_images*, which must correspond else an
ImageComparisonFailure exception will be raised.
Keyword arguments:
*baseline_images*: list
A list of strings specifying the names of the images generated
by calls to :meth:`matplotlib.figure.savefig`.
*extensions*: [ None | list ]
If *None*, default to all supported extensions.
Otherwise, a list of extensions to test. For example ['png','pdf'].
*tol*: (default 1e-3)
The RMS threshold above which the test is considered failed.
*freetype_version*: str or tuple
The expected freetype version or range of versions for this
test to pass.
*remove_text*: bool
Remove the title and tick text from the figure before
comparison. This does not remove other, more deliberate,
text, such as legends and annotations.
"""
if baseline_images is None:
raise ValueError('baseline_images must be specified')
if extensions is None:
# default extensions to test
extensions = ['png', 'pdf', 'svg']
def compare_images_decorator(func):
# We want to run the setup function (the actual test function
# that generates the figure objects) only once for each type
# of output file. The only way to achieve this with nose
# appears to be to create a test class with "setup_class" and
# "teardown_class" methods. Creating a class instance doesn't
# work, so we use type() to actually create a class and fill
# it with the appropriate methods.
name = func.__name__
# For nose 1.0, we need to rename the test function to
# something without the word "test", or it will be run as
# well, outside of the context of our image comparison test
# generator.
func = staticmethod(func)
func.__get__(1).__name__ = '_private'
new_class = type(
name,
(ImageComparisonTest,),
{'_func': func,
'_baseline_images': baseline_images,
'_extensions': extensions,
'_tol': tol,
'_freetype_version': freetype_version,
'_remove_text': remove_text})
return new_class
return compare_images_decorator
def _image_directories(func):
"""
Compute the baseline and result image directories for testing *func*.
Create the result directory if it doesn't exist.
"""
module_name = func.__module__
if module_name == '__main__':
# FIXME: this won't work for nested packages in matplotlib.tests
warnings.warn('test module run as script. guessing baseline image locations')
script_name = sys.argv[0]
basedir = os.path.abspath(os.path.dirname(script_name))
subdir = os.path.splitext(os.path.split(script_name)[1])[0]
else:
mods = module_name.split('.')
mods.pop(0) # <- will be the name of the package being tested (in
# most cases "matplotlib")
assert mods.pop(0) == 'tests'
subdir = os.path.join(*mods)
import imp
def find_dotted_module(module_name, path=None):
"""A version of imp which can handle dots in the module name"""
res = None
for sub_mod in module_name.split('.'):
res = _, path, _ = imp.find_module(sub_mod, path)
path = [path]
return res
mod_file = find_dotted_module(func.__module__)[1]
basedir = os.path.dirname(mod_file)
baseline_dir = os.path.join(basedir, 'baseline_images', subdir)
result_dir = os.path.abspath(os.path.join('result_images', subdir))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
return baseline_dir, result_dir
| mit |
mattgiguere/scikit-learn | sklearn/utils/fixes.py | 29 | 12072 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
| bsd-3-clause |
jmetzen/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
Suraj1006/Pifm | src/generate_waveforms.py | 15 | 2403 | #!/usr/bin/python
# PiFmRds - FM/RDS transmitter for the Raspberry Pi
# Copyright (C) 2014 Christophe Jacquet, F8FTK
#
# See https://github.com/ChristopheJacquet/PiFmRds
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This program generates the waveform of a single biphase symbol
#
# This program uses Pydemod, see https://github.com/ChristopheJacquet/Pydemod
import pydemod.app.rds as rds
import numpy
import scipy.io.wavfile as wavfile
import io
import matplotlib.pyplot as plt
sample_rate = 228000
outc = io.open("waveforms.c", mode="w", encoding="utf8")
outh = io.open("waveforms.h", mode="w", encoding="utf8")
header = u"""
/* This file was automatically generated by "generate_waveforms.py".
(C) 2014 Christophe Jacquet.
Released under the GNU GPL v3 license.
*/
"""
outc.write(header)
outh.write(header)
def generate_bit(name):
offset = 240
l = 96
count = 2
sample = numpy.zeros(3*l)
sample[l] = 1
sample[2*l] = -1
# Apply the data-shaping filter
sf = rds.pulse_shaping_filter(96*8, 228000)
shapedSamples = numpy.convolve(sample, sf)
out = shapedSamples[528-288:528+288] #[offset:offset+l*count]
#plt.plot(sf)
#plt.plot(out)
#plt.show()
iout = (out * 20000./max(abs(out)) ).astype(numpy.dtype('>i2'))
wavfile.write(u"waveform_{}.wav".format(name), sample_rate, iout)
outc.write(u"float waveform_{name}[] = {{{values}}};\n\n".format(
name = name,
values = u", ".join(map(unicode, out/2.5))))
# note: need to limit the amplitude so as not to saturate when the biphase
# waveforms are summed
outh.write(u"extern float waveform_{name}[{size}];\n".format(name=name, size=len(out)))
generate_bit("biphase")
outc.close()
outh.close() | gpl-3.0 |
ric2b/Vivaldi-browser | chromium/tools/perf/cli_tools/flakiness_cli/main.py | 10 | 2182 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This tool provides a command line interface for the flakiness dashboard."""
from __future__ import print_function
import argparse
from cli_tools.flakiness_cli import analysis
from cli_tools.flakiness_cli import cached_api
def Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--master', help='include results for this master only, can use'
' shell-style wildcards to match multiple masters.')
parser.add_argument(
'--builder', help='include results for this builder only, can use'
' shell-style wildcards to match multiple builders.')
parser.add_argument(
'--test-type', help='include results for this test type only, can use'
' shell-style wildcards to match multiple test types.')
parser.add_argument(
'--test-suite', help='include results for this test suite only, can use'
' shell-style wildcards to match multiple test types.')
parser.add_argument(
'--half-life', default=7, type=int, help='test failures this many days'
' ago are half as important as failures today.')
parser.add_argument(
'--threshold', default=5.0, type=float, help='only show test '
' with flakiness above this level.')
args = parser.parse_args()
configs = cached_api.GetBuilders()
configs = analysis.FilterBy(configs, master=args.master,
builder=args.builder, test_type=args.test_type)
if configs.empty:
return 'Your query selected no test configurations'
dfs = []
for row in configs.itertuples():
df = cached_api.GetTestResults(row.master, row.builder, row.test_type)
df = analysis.FilterBy(df, test_suite=args.test_suite)
if df.empty:
continue
df = analysis.AggregateBuilds(df, args.half_life)
df = df[df['flakiness'] > args.threshold]
if df.empty:
continue
dfs.append(df)
if not dfs:
return 'Your query selected no test configurations'
df = analysis.pandas.concat(dfs)
df = df.sort_values('flakiness', ascending=False)
print(df)
| bsd-3-clause |
jtamir/mri-sim-py | TODO/cpmg-prop_2spin.py | 1 | 8206 | #!/usr/bin/python
import numpy as np
from numpy import pi, cos, sin, exp, conj
from warnings import warn
import epgcpmg as epg
import time
import sys
import scipy.io
class PulseTrain:
def __init__(self, state_file, T, TE, TR, loss_fun, loss_fun_prime, angles_rad=None, verbose=False, step=.01, max_iter=100):
self.state_file = state_file
self.T = T
self.TE = TE
self.TR = TR
self.loss_fun = loss_fun
self.loss_fun_prime = loss_fun_prime
self.max_iter = max_iter
self.step = step
self.verbose = verbose
self.reset()
if angles_rad is not None:
self.set_angles_rad(angles_rad)
def set_angles_rad(self, angles_rad):
T = len(angles_rad)
if T < self.T:
self.angles_rad = np.hstack((angles_rad, np.zeros((self.T-T))))
else:
self.angles_rad = angles_rad[:self.T]
def reset(self):
self.angles_rad = DEG2RAD(50 + (120 - 50) * np.random.rand(self.T))
self.loss = []
def save_state(self, filename=None):
state = {
'angles_rad': self.angles_rad,
'loss': self.loss,
'max_iter': self.max_iter,
'step': self.step,
'T': self.T,
'TE': self.TE,
'verbose': self.verbose,
}
if filename is None:
scipy.io.savemat(self.state_file, state, appendmat=False)
else:
scipy.io.savemat(filename, state, appendmat=False)
def load_state(self, filename=None):
if filename is None:
state = scipy.io.loadmat(self.state_file)
else:
state = scipy.io.loadmat(filename)
self.angles_rad = state['angles_rad'].ravel()
self.loss = list(state['loss'].ravel())
self.max_iter = state['max_iter'].ravel()[0]
self.step = state['step'].ravel()[0]
self.T = state['T'].ravel()[0]
self.TE = state['TE'].ravel()[0]
self.verbose = state['verbose'].ravel()[0]
def train(self, theta1, theta2):
for i in range(self.max_iter):
angles_prime = self.loss_fun_prime(theta1, theta2, self.angles_rad, self.TE, self.TR)
self.angles_rad = self.angles_rad + self.step * angles_prime
self.loss.append(self.loss_fun(theta1, theta2, self.angles_rad, self.TE, self.TR))
str = '%d\t%3.3f' % (i, self.loss[-1])
self.print_verbose(str)
def print_verbose(self, str):
if self.verbose:
print str, RAD2DEG(self.angles_rad)
def plot_vals(self, thetas):
plt.subplot(2,1,1)
plt.plot(range(self.T), RAD2DEG(self.angles_rad), 'o-')
plt.xlim((0,self.T))
plt.subplot(2,1,2)
for theta in thetas:
plt.plot(range(self.T), epg.FSE_signal(self.angles_rad, self.TE, theta['T1'], theta['T2']))
plt.xlim((0,self.T))
plt.ylim((0,1))
def forward(self, theta):
return epg.FSE_signal(self.angles_rad, TE, theta['T1'], theta['T2']).ravel()
def loss(theta1, theta2, angles_rad, TE, TR):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, TE, theta1['T1'], theta1['T2']) * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, TE, theta2['T1'], theta2['T2']) * (1 - exp(-(TR - T * TE)/theta2['T1']))
return 0.5 * np.linalg.norm(x1, ord=2)**2 + 0.5 * np.linalg.norm(x2, ord=2)**2 - np.dot(x1.ravel(), x2.ravel())
def normalized_loss(theta1, theta2, angles_rad, TE, TR):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, TE, theta1['T1'], theta1['T2']) * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, TE, theta2['T1'], theta2['T2']) * (1 - exp(-(TR - T * TE)/theta2['T1']))
x1 = x1 / np.linalg.norm(x1, ord=2)
x2 = x2 / np.linalg.norm(x2, ord=2)
return - np.dot(x1.ravel(), x2.ravel())
def loss_prime(theta1, theta2, angles_rad, TE, TR):
T = len(angles_rad)
x1 = epg.FSE_signal(angles_rad, TE, theta1['T1'], theta1['T2']).ravel() * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2 = epg.FSE_signal(angles_rad, TE, theta2['T1'], theta2['T2']).ravel() * (1 - exp(-(TR - T * TE)/theta2['T1']))
T = len(angles_rad)
alpha_prime = np.zeros((T,))
for i in range(T):
x1_prime = epg.FSE_signal_prime_alpha_idx(angles_rad, TE, theta1['T1'], theta1['T2'], i).ravel() * (1 - exp(-(TR - T * TE)/theta1['T1']))
x2_prime = epg.FSE_signal_prime_alpha_idx(angles_rad, TE, theta2['T1'], theta2['T2'], i).ravel() * (1 - exp(-(TR - T * TE)/theta2['T1']))
M1 = np.dot(x1, x1_prime)
M2 = np.dot(x2, x2_prime)
M3 = np.dot(x1, x2_prime)
M4 = np.dot(x2, x1_prime)
alpha_prime[i] = M1 + M2 - M3 - M4
return alpha_prime
def get_params(theta):
return theta['T1'], theta['T2']
def numerical_gradient(theta1, theta2, angles_rad, TE, TR):
initial_params = angles_rad
num_grad = np.zeros(initial_params.shape)
perturb = np.zeros(initial_params.shape)
e = 1e-5
for p in range(len(initial_params)):
perturb[p] = e
loss2 = loss(theta1, theta2, angles_rad + perturb, TE, TR)
loss1 = loss(theta1, theta2, angles_rad - perturb, TE, TR)
num_grad[p] = (loss2 - loss1) / (2 * e)
perturb[p] = 0
return num_grad
def DEG2RAD(angle):
return np.pi * angle / 180
def RAD2DEG(angle_rad):
return 180 * angle_rad / np.pi
def read_angles(fliptable):
f = open(fliptable, 'r')
angles = []
for line in f.readlines():
angles.append(float(line))
f.close()
return np.array(angles)
def print_table(P1, P2, P3):
print
print '\tP1\tP2\tP3\nloss\t%3.3f\t%3.3f\t%3.3f\nnloss\t%3.3f\t%3.3f\t%3.3f\n' % (
loss(theta1, theta2, P1.angles_rad, TE, TR),
loss(theta1, theta2, P2.angles_rad, TE, TR),
loss(theta1, theta2, P3.angles_rad, TE, TR),
normalized_loss(theta1, theta2, P1.angles_rad, TE, TR),
normalized_loss(theta1, theta2, P2.angles_rad, TE, TR),
normalized_loss(theta1, theta2, P3.angles_rad, TE, TR)
)
if __name__ == "__main__":
import matplotlib.pyplot as plt
np.set_printoptions(suppress=True, precision=3)
T1 = 1000e-3
T2 = 200e-3
TE = 50e-3
TR = 1.4
if len(sys.argv) > 1:
T = int(sys.argv[1])
else:
T = 10
angles = 150 * np.ones((T,))
angles = read_angles('../data/flipangles.txt.408183520')
TT = len(angles)
if TT < T:
T = TT
else:
angles = angles[:T]
angles_rad = DEG2RAD(angles)
S = epg.FSE_signal(angles_rad, TE, T1, T2)
S2 = abs(S)
theta1 = {'T1': 1000e-3, 'T2': 20e-3}
theta2 = {'T1': 1000e-3, 'T2': 100e-3}
t1 = time.time()
NG = numerical_gradient(theta1, theta2, angles_rad, TE, TR)
t2 = time.time()
LP = loss_prime(theta1, theta2, angles_rad, TE, TR)
t3 = time.time()
NG_time = t2 - t1
LP_time = t3 - t2
print 'Numerical Gradient\t(%03.3f s)\t' % NG_time, NG
print
print 'Analytical Gradient\t(%03.3f s)\t' % LP_time, LP
print
print 'Error:', np.linalg.norm(NG - LP) / np.linalg.norm(NG)
#plt.plot(TE*1000*np.arange(1, T+1), S2)
#plt.xlabel('time (ms)')
#plt.ylabel('signal')
#plt.title('T1 = %.2f ms, T2 = %.2f ms' % (T1 * 1000, T2 * 1000))
#plt.show()
a = angles_rad
#a = np.pi * np.ones((T,))
a = None
P1 = PulseTrain('angles_rand.mat', T, TE, TR, loss, loss_prime, angles_rad=a, verbose=True)
#P1.load_state()
P2 = PulseTrain('angles_180.mat', T, TE, TR, loss, loss_prime, angles_rad=np.pi * np.ones((T,)), verbose=True)
P3 = PulseTrain('angles_vfa.mat', T, TE, TR, loss, loss_prime, angles_rad=angles_rad, verbose=True)
print_table(P1, P2, P3)
P1.train(theta1, theta2)
print_table(P1, P2, P3)
plt.figure(1)
plt.clf()
P1.plot_vals((theta1, theta2))
plt.figure(2)
plt.clf()
P2.plot_vals((theta1, theta2))
plt.figure(3)
plt.clf()
P3.plot_vals((theta1, theta2))
plt.show()
MAX_ANGLE = DEG2RAD(120)
MIN_ANGLE = DEG2RAD(50)
| mit |
blackball/an-test6 | util/sip_plot_distortion.py | 1 | 2423 | import matplotlib
matplotlib.use('Agg')
import sys
from optparse import *
import numpy as np
from pylab import *
from numpy import *
#from astrometry.util.sip import *
from astrometry.util.util import *
def plot_distortions(wcsfn, ex=1, ngridx=10, ngridy=10, stepx=10, stepy=10):
wcs = Sip(wcsfn)
W,H = wcs.wcstan.imagew, wcs.wcstan.imageh
xgrid = np.linspace(0, W, ngridx)
ygrid = np.linspace(0, H, ngridy)
X = np.linspace(0, W, int(ceil(W/stepx)))
Y = np.linspace(0, H, int(ceil(H/stepy)))
xlo,xhi,ylo,yhi = 0,W,0,H
for x in xgrid:
DX,DY = [],[]
xx,yy = [],[]
for y in Y:
dx,dy = wcs.get_distortion(x, y)
xx.append(x)
yy.append(y)
DX.append(dx)
DY.append(dy)
DX = array(DX)
DY = array(DY)
xx = array(xx)
yy = array(yy)
EX = DX + ex * (DX - xx)
EY = DY + ex * (DY - yy)
plot(xx, yy, 'k-', alpha=0.5)
plot(EX, EY, 'r-')
xlo = min(xlo, min(EX))
xhi = max(xhi, max(EX))
ylo = min(ylo, min(EY))
yhi = max(yhi, max(EY))
for y in ygrid:
DX,DY = [],[]
xx,yy = [],[]
for x in X:
dx,dy = wcs.get_distortion(x, y)
DX.append(dx)
DY.append(dy)
xx.append(x)
yy.append(y)
DX = array(DX)
DY = array(DY)
xx = array(xx)
yy = array(yy)
EX = DX + ex * (DX - xx)
EY = DY + ex * (DY - yy)
plot(xx, yy, 'k-', alpha=0.5)
plot(EX, EY, 'r-')
xlo = min(xlo, min(EX))
xhi = max(xhi, max(EX))
ylo = min(ylo, min(EY))
yhi = max(yhi, max(EY))
plot([wcs.wcstan.crpix[0]], [wcs.wcstan.crpix[1]], 'rx')
#axis([0, W, 0, H])
axis('scaled')
axis([xlo,xhi,ylo,yhi])
#axis('tight')
if __name__ == '__main__':
parser = OptionParser(usage='%prog [options] <wcs-filename> <plot-filename>')
parser.add_option('-e', '--ex', '--exaggerate', dest='ex', type='float', help='Exaggerate the distortion by this factor')
#parser.add_option('-s', '--scale', dest='scale', type='float', help='Scale the
parser.add_option('-n', dest='nsteps', type='int', help='Number of grid lines to plot')
parser.set_defaults(ex=1.)
opt,args = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(-1)
wcsfn = args[0]
outfn = args[1]
args = {}
if opt.ex is not None:
args['ex'] = opt.ex
if opt.nsteps is not None:
args['ngridx'] = opt.nsteps
args['ngridy'] = opt.nsteps
clf()
plot_distortions(wcsfn, **args)
tt = 'SIP distortions: %s' % wcsfn
if opt.ex != 1:
tt += ' (exaggerated by %g)' % opt.ex
title(tt)
savefig(outfn)
| gpl-2.0 |
ISS-Mimic/Mimic | Pi/GUI.py | 1 | 164864 | #!/usr/bin/python
from datetime import datetime, timedelta #used for time conversions and logging timestamps
import datetime as dtime #this is different from above for... reasons?
import os # used to remove database on program exit; also used for importing config.json
from subprocess import Popen #, PIPE, STDOUT #used to start/stop Javascript telemetry program and TDRS script and orbitmap
import time #used for time
import math #used for math
import glob #used to parse serial port names
import sqlite3 #used to access ISS telemetry database
import pytz #used for timezone conversion in orbit pass predictions
from bs4 import BeautifulSoup #used to parse webpages for data (EVA stats, ISS TLE)
import numpy as np
import ephem #used for TLE orbit information on orbit screen
import serial #used to send data over serial to arduino
import json # used for serial port config
from pyudev import Context, Devices, Monitor, MonitorObserver # for automatically detecting Arduinos
import argparse
import sys
import os.path as op #use for getting mimic directory
# This is here because Kivy gets upset if you pass in your own non-Kivy args
CONFIG_FILE_PATH = os.path.join(os.path.dirname(__file__), "config.json")
parser = argparse.ArgumentParser(description='ISS Mimic GUI. Arguments listed below are non-Kivy arguments.')
parser.add_argument(
'--config', action='store_true',
help='use config.json to manually specify serial ports to use',
default=False)
args, kivy_args = parser.parse_known_args()
sys.argv[1:] = kivy_args
USE_CONFIG_JSON = args.config
from kivy.app import App
from kivy.lang import Builder
from kivy.network.urlrequest import UrlRequest #using this to request webpages
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import ScreenManager, Screen, SwapTransition
from kivy.uix.popup import Popup
from kivy.uix.label import Label
import database_initialize # create and populate database script
""" Unused imports
import kivy
from kivy.core.window import Window
import threading #trying to send serial write to other thread
matplotlib for plotting day/night time
import matplotlib.pyplot as plt
from matplotlib import path
from mpl_toolkits.basemap import Basemap
"""
mimic_directory = op.abspath(op.join(__file__, op.pardir, op.pardir, op.pardir))
print("Mimic Directory: " + mimic_directory)
# Constants
SERIAL_SPEED = 9600
os.environ['KIVY_GL_BACKEND'] = 'gl' #need this to fix a kivy segfault that occurs with python3 for some reason
# Create Program Logs
mimiclog = open(mimic_directory + '/Mimic/Pi/Logs/mimiclog.txt', 'w')
def logWrite(*args):
mimiclog.write(str(datetime.utcnow()))
mimiclog.write(' ')
mimiclog.write(str(args[0]))
mimiclog.write('\n')
mimiclog.flush()
logWrite("Initialized Mimic Program Log")
#-------------------------Look for a connected arduino-----------------------------------
def remove_tty_device(name_to_remove):
""" Removes tty device from list of serial ports. """
global SERIAL_PORTS, OPEN_SERIAL_PORTS
try:
SERIAL_PORTS.remove(name_to_remove)
idx_to_remove = -1
for x in range(len(OPEN_SERIAL_PORTS)):
if name_to_remove in str(OPEN_SERIAL_PORTS[x]):
idx_to_remove = x
if idx_to_remove != -1:
del OPEN_SERIAL_PORTS[idx_to_remove]
log_str = "Removed %s." % name_to_remove
logWrite(log_str)
print(log_str)
except ValueError:
# Not printing anything because it sometimes tries too many times and is irrelevant
pass
def add_tty_device(name_to_add):
""" Adds tty device to list of serial ports after it successfully opens. """
global SERIAL_PORTS, OPEN_SERIAL_PORTS
if name_to_add not in SERIAL_PORTS:
try:
SERIAL_PORTS.append(name_to_add)
OPEN_SERIAL_PORTS.append(serial.Serial(SERIAL_PORTS[-1], SERIAL_SPEED, write_timeout=0, timeout=0))
log_str = "Added and opened %s." % name_to_add
logWrite(log_str)
print(log_str)
except IOError as e:
# Not printing anything because sometimes it successfully opens soon after
remove_tty_device(name_to_add) # don't leave it in the list if it didn't open
def detect_device_event(device):
""" Callback for MonitorObserver to detect tty device and add or remove it. """
if 'tty' in device.device_path:
name = '/dev/' + (device.device_path).split('/')[-1:][0]
if device.action == 'remove':
remove_tty_device(name)
if device.action == 'add':
add_tty_device(name)
def is_arduino_id_vendor_string(text):
"""
It's not ideal to have to include FTDI because that's somewhat
generic, but if we want to use something like the Arduino Nano,
that's what it shows up as. If it causes a problem, we can change
it -- or the user can specify to use the config.json file instead.
"""
if "Arduino" in text or "Adafruit" in text or "FTDI" in text:
return True
return False
def parse_tty_name(device, val):
"""
Parses tty name from ID_VENDOR string.
Example of device as a string:
Device('/sys/devices/platform/scb/fd500000.pcie/pci0000:00/0000:00:00.0/0000:01:00.0/usb1/1-1/1-1.1/1-1.1.1/1-1.1.1:1.0/tty/ttyACM0')
"""
if is_arduino_id_vendor_string(val):
name = str(device).split('/')[-1:][0][:-2] # to get ttyACM0, etc.
return '/dev/' + name
logWrite("Skipping serial device:\n%s" % str(device))
def get_tty_dev_names(context):
""" Checks ID_VENDOR string of tty devices to identify Arduinos. """
names = []
devices = context.list_devices(subsystem='tty')
for d in devices:
for k, v in d.items():
if k is not None and k == 'ID_VENDOR':
names.append(parse_tty_name(d, v))
return names
def get_config_data():
""" Get the JSON config data. """
data = {}
with open (CONFIG_FILE_PATH, 'r') as f:
data = json.load(f)
return data
def get_serial_ports(context, using_config_file=False):
""" Gets the serial ports either from a config file or pyudev """
serial_ports = []
if using_config_file:
data = get_config_data()
serial_ports = data['arduino']['serial_ports']
else:
serial_ports = get_tty_dev_names(context)
return serial_ports
def open_serial_ports(serial_ports):
""" Open all the serial ports in the list. Used when the GUI is first opened. """
global OPEN_SERIAL_PORTS
try:
for s in serial_ports:
OPEN_SERIAL_PORTS.append(serial.Serial(s, SERIAL_SPEED, write_timeout=0, timeout=0))
except (OSError, serial.SerialException) as e:
if USE_CONFIG_JSON:
print("\nNot all serial ports were detected. Check config.json for accuracy.\n\n%s" % e)
raise Exception(e)
def serialWrite(*args):
""" Writes to serial ports in list. """
logWrite("Function call - serial write: " + str(*args))
for s in OPEN_SERIAL_PORTS:
try:
s.write(str.encode(*args))
except (OSError, serial.SerialException) as e:
logWrite(e)
context = Context()
if not USE_CONFIG_JSON:
MONITOR = Monitor.from_netlink(context)
TTY_OBSERVER = MonitorObserver(MONITOR, callback=detect_device_event, name='monitor-observer')
TTY_OBSERVER.daemon = False
SERIAL_PORTS = get_serial_ports(context, USE_CONFIG_JSON)
OPEN_SERIAL_PORTS = []
open_serial_ports(SERIAL_PORTS)
log_str = "Serial ports opened: %s" % str(SERIAL_PORTS)
logWrite(log_str)
print(log_str)
if not USE_CONFIG_JSON:
TTY_OBSERVER.start()
log_str = "Started monitoring serial ports."
print(log_str)
logWrite(log_str)
#-------------------------TDRS Checking Database-----------------------------------------
TDRSconn = sqlite3.connect('/dev/shm/tdrs.db')
TDRSconn.isolation_level = None
TDRScursor = TDRSconn.cursor()
conn = sqlite3.connect('/dev/shm/iss_telemetry.db')
conn.isolation_level = None
c = conn.cursor()
def staleTelemetry():
c.execute("UPDATE telemetry SET Value = 'Unsubscribed' where Label = 'Lightstreamer'")
#----------------------------------Variables---------------------------------------------
LS_Subscription = False
isslocationsuccess = False
testfactor = -1
crew_mention= False
mimicbutton = False
fakeorbitboolean = False
demoboolean = False
switchtofake = False
manualcontrol = False
startup = True
isscrew = 0
val = ""
tdrs1 = 0
tdrs2 = 0
tdrs_timestamp = 0
lastsignal = 0
testvalue = 0
obtained_EVA_crew = False
unixconvert = time.gmtime(time.time())
EVAstartTime = float(unixconvert[7])*24+unixconvert[3]+float(unixconvert[4])/60+float(unixconvert[5])/3600
alternate = True
Beta4Bcontrol = False
Beta3Bcontrol = False
Beta2Bcontrol = False
Beta1Bcontrol = False
Beta4Acontrol = False
Beta3Acontrol = False
Beta2Acontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
stopAnimation = True
startingAnim = True
oldtdrs = "n/a"
runningDemo = False
Disco = False
logged = False
mt_speed = 0.00
#-----------EPS Variables----------------------
EPSstorageindex = 0
channel1A_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel1B_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel2A_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel2B_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel3A_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel3B_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel4A_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
channel4B_voltage = [154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1, 154.1]
sizeX = 0.00
sizeY = 0.00
psarj2 = 1.0
ssarj2 = 1.0
new_x = 0
new_y = 0
new_x2 = 0
new_y2 = 0
aos = 0.00
los = 0.00
sgant_elevation = 0.00
sgant_xelevation = 0.00
sgant_elevation_old = -110.00
seconds2 = 260
oldLOS = 0.00
psarjmc = 0.00
ssarjmc = 0.00
ptrrjmc = 0.00
strrjmc = 0.00
beta1bmc = 0.00
beta1amc = 0.00
beta2bmc = 0.00
beta2amc = 0.00
beta3bmc = 0.00
beta3amc = 0.00
beta4bmc = 0.00
beta4amc = 0.00
US_EVAinProgress = False
leak_hold = False
firstcrossing = True
oldAirlockPump = 0.00
position_x = 0.00
position_y = 0.00
position_z = 0.00
velocity_x = 0.00
velocity_y = 0.00
velocity_z = 0.00
velocity = 0.00
altitude = 0.00
mass = 0.00
crewlockpres = 758
EVA_activities = False
repress = False
depress = False
seconds = 0
minutes = 0
hours = 0
leak_hold = False
EV1 = ""
EV2 = ""
numEVAs1 = ""
EVAtime_hours1 = ""
EVAtime_minutes1 = ""
numEVAs2 = ""
EVAtime_hours2 = ""
EVAtime_minutes2 = ""
holdstartTime = float(unixconvert[7])*24+unixconvert[3]+float(unixconvert[4])/60+float(unixconvert[5])/3600
eva = False
standby = False
prebreath1 = False
prebreath2 = False
depress1 = False
depress2 = False
leakhold = False
repress = False
ISS_TLE_Acquired = False
stationmode = 0.00
tdrs = ""
EVA_picture_urls = []
urlindex = 0
module = ""
internet = False
old_mt_timestamp = 0.00
old_mt_position = 0.00
class MainScreen(Screen):
def changeManualControlBoolean(self, *args):
global manualcontrol
manualcontrol = args[0]
def killproc(*args):
global p,p2
if not USE_CONFIG_JSON:
TTY_OBSERVER.stop()
log_str = "Stopped monitoring serial ports."
logWrite(log_str)
print(log_str)
try:
p.kill()
p2.kill()
except Exception:
pass
os.system('rm /dev/shm/*') #delete sqlite database on exit, db is recreated each time to avoid concurrency issues
staleTelemetry()
logWrite("Successfully stopped ISS telemetry javascript and removed database")
class ManualControlScreen(Screen):
def on_pre_enter(self): #call the callback funcion when activating this screen, to update all angles
self.callback()
def callback(self):
global psarjmc,ssarjmc,ptrrjmc,strrjmc,beta1amc,beta1bmc,beta2amc,beta2bmc,beta3amc,beta3bmc,beta4amc,beta4bmc
self.ids.Beta4B_Button.text = "4B\n" + str(math.trunc(beta4bmc))
self.ids.Beta4A_Button.text = "4A\n" + str(math.trunc(beta4amc))
self.ids.Beta3B_Button.text = "3B\n" + str(math.trunc(beta3bmc))
self.ids.Beta3A_Button.text = "3A\n" + str(math.trunc(beta3amc))
self.ids.Beta2B_Button.text = "2B\n" + str(math.trunc(beta2bmc))
self.ids.Beta2A_Button.text = "2A\n" + str(math.trunc(beta2amc))
self.ids.Beta1B_Button.text = "1B\n" + str(math.trunc(beta1bmc))
self.ids.Beta1A_Button.text = "1A\n" + str(math.trunc(beta1amc))
self.ids.PSARJ_Button.text = "PSARJ " + str(math.trunc(psarjmc))
self.ids.SSARJ_Button.text = "SSARJ " + str(math.trunc(ssarjmc))
self.ids.PTRRJ_Button.text = "PTRRJ\n" + str(math.trunc(ptrrjmc))
self.ids.STRRJ_Button.text = "STRRJ\n" + str(math.trunc(strrjmc))
def zeroJoints(self):
global psarjmc,ssarjmc,ptrrjmc,strrjmc,beta1amc,beta1bmc,beta2amc,beta2bmc,beta3amc,beta3bmc,beta4amc,beta4bmc
serialWrite("NULLIFY=1 ")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1a'")
beta1amc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1b'")
beta1bmc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2a'")
beta2amc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2b'")
beta2bmc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3a'")
beta3amc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3b'")
beta3bmc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4a'")
beta4amc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4b'")
beta4bmc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'psarj'")
psarjmc = 0.00
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'ssarj'")
ssarjmc = 0.00
self.callback()
def setActive(self, *args):
global Beta4Bcontrol, Beta3Bcontrol, Beta2Bcontrol, Beta1Bcontrol, Beta4Acontrol, Beta3Acontrol, Beta2Acontrol, Beta1Acontrol, PSARJcontrol, SSARJcontrol, PTRRJcontrol, STRRJcontrol
if str(args[0])=="Beta4B":
Beta4Bcontrol = True
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (0, 0, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta3B":
Beta3Bcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (0, 0, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta2B":
Beta2Bcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (0, 0, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta1B":
Beta1Bcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (0, 0, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta4A":
Beta4Acontrol = True
Beta4Bcontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (0, 0, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta3A":
Beta3Acontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (0, 0, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta2A":
Beta2Acontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (0, 0, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="Beta1A":
Beta1Acontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (0, 0, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="PTRRJ":
PTRRJcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (0, 0, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="STRRJ":
STRRJcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
SSARJcontrol = False
PTRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (0, 0, 1, 1)
if str(args[0])=="PSARJ":
PSARJcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
SSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (0, 0, 1, 1)
self.ids.SSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
if str(args[0])=="SSARJ":
SSARJcontrol = True
Beta4Bcontrol = False
Beta4Acontrol = False
Beta3Bcontrol = False
Beta3Acontrol = False
Beta2Bcontrol = False
Beta2Acontrol = False
Beta1Bcontrol = False
Beta1Acontrol = False
PSARJcontrol = False
PTRRJcontrol = False
STRRJcontrol = False
self.ids.Beta4B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta4A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta3A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta2A_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1B_Button.background_color = (1, 1, 1, 1)
self.ids.Beta1A_Button.background_color = (1, 1, 1, 1)
self.ids.PSARJ_Button.background_color = (1, 1, 1, 1)
self.ids.SSARJ_Button.background_color = (0, 0, 1, 1)
self.ids.PTRRJ_Button.background_color = (1, 1, 1, 1)
self.ids.STRRJ_Button.background_color = (1, 1, 1, 1)
def incrementActive(self, *args):
global Beta4Bcontrol, Beta3Bcontrol, Beta2Bcontrol, Beta1Bcontrol, Beta4Acontrol, Beta3Acontrol, Beta2Acontrol, Beta1Acontrol, PSARJcontrol, SSARJcontrol, PTRRJcontrol, STRRJcontrol
if Beta4Bcontrol:
self.incrementBeta4B(float(args[0]))
if Beta3Bcontrol:
self.incrementBeta3B(float(args[0]))
if Beta2Bcontrol:
self.incrementBeta2B(float(args[0]))
if Beta1Bcontrol:
self.incrementBeta1B(float(args[0]))
if Beta4Acontrol:
self.incrementBeta4A(float(args[0]))
if Beta3Acontrol:
self.incrementBeta3A(float(args[0]))
if Beta2Acontrol:
self.incrementBeta2A(float(args[0]))
if Beta1Acontrol:
self.incrementBeta1A(float(args[0]))
if PTRRJcontrol:
self.incrementPTRRJ(float(args[0]))
if STRRJcontrol:
self.incrementSTRRJ(float(args[0]))
if PSARJcontrol:
self.incrementPSARJ(float(args[0]))
if SSARJcontrol:
self.incrementSSARJ(float(args[0]))
self.callback()
def incrementPSARJ(self, *args):
global psarjmc
psarjmc += args[0]
serialWrite("PSARJ=" + str(psarjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'psarj'",(psarjmc,))
self.ids.statusbar.text = "PSARJ Value Sent: " + str(psarjmc)
def incrementSSARJ(self, *args):
global ssarjmc
ssarjmc += args[0]
serialWrite("SSARJ=" + str(ssarjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ssarj'",(ssarjmc,))
self.ids.statusbar.text = "SSARJ Value Sent: " + str(ssarjmc)
def incrementPTRRJ(self, *args):
global ptrrjmc
ptrrjmc += args[0]
serialWrite("PTRRJ=" + str(ptrrjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ptrrj'",(ptrrjmc,))
self.ids.statusbar.text = "PTRRJ Value Sent: " + str(ptrrjmc)
def incrementSTRRJ(self, *args):
global strrjmc
strrjmc += args[0]
serialWrite("STRRJ=" + str(strrjmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'strrj'",(strrjmc,))
self.ids.statusbar.text = "STRRJ Value Sent: " + str(strrjmc)
def incrementBeta1B(self, *args):
global beta1bmc
beta1bmc += args[0]
serialWrite("B1B=" + str(beta1bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1b'",(beta1bmc,))
self.ids.statusbar.text = "Beta1B Value Sent: " + str(beta1bmc)
def incrementBeta1A(self, *args):
global beta1amc
beta1amc += args[0]
serialWrite("B1A=" + str(beta1amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1a'",(beta1amc,))
self.ids.statusbar.text = "Beta1A Value Sent: " + str(beta1amc)
def incrementBeta2B(self, *args):
global beta2bmc
beta2bmc += args[0]
serialWrite("B2B=" + str(beta2bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2b'",(beta2bmc,))
self.ids.statusbar.text = "Beta2B Value Sent: " + str(beta2bmc)
def incrementBeta2A(self, *args):
global beta2amc
beta2amc += args[0]
serialWrite("B2A=" + str(beta2amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2a'",(beta2amc,))
self.ids.statusbar.text = "Beta2A Value Sent: " + str(beta2amc)
def incrementBeta3B(self, *args):
global beta3bmc
beta3bmc += args[0]
serialWrite("B3B=" + str(beta3bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3b'",(beta3bmc,))
self.ids.statusbar.text = "Beta3B Value Sent: " + str(beta3bmc)
def incrementBeta3A(self, *args):
global beta3amc
beta3amc += args[0]
serialWrite("B3A=" + str(beta3amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3a'",(beta3amc,))
self.ids.statusbar.text = "Beta3A Value Sent: " + str(beta3amc)
def incrementBeta4B(self, *args):
global beta4bmc
beta4bmc += args[0]
serialWrite("B4B=" + str(beta4bmc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4b'",(beta4bmc,))
self.ids.statusbar.text = "Beta4B Value Sent: " + str(beta4bmc)
def incrementBeta4A(self, *args):
global beta4amc
beta4amc += args[0]
serialWrite("B4A=" + str(beta4amc) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4a'",(beta4amc,))
self.ids.statusbar.text = "Beta4A Value Sent: " + str(beta4amc)
def changeBoolean(self, *args):
global manualcontrol
manualcontrol = args[0]
def sendActive(self, *args):
if Beta4Bcontrol:
self.sendBeta4B(float(args[0]))
if Beta3Bcontrol:
self.sendBeta3B(float(args[0]))
if Beta2Bcontrol:
self.sendBeta2B(float(args[0]))
if Beta1Bcontrol:
self.sendBeta1B(float(args[0]))
if Beta4Acontrol:
self.sendBeta4A(float(args[0]))
if Beta3Acontrol:
self.sendBeta3A(float(args[0]))
if Beta2Acontrol:
self.sendBeta2A(float(args[0]))
if Beta1Acontrol:
self.sendBeta1A(float(args[0]))
if PTRRJcontrol:
self.sendPTRRJ(float(args[0]))
if STRRJcontrol:
self.sendSTRRJ(float(args[0]))
if PSARJcontrol:
self.sendPSARJ(float(args[0]))
if SSARJcontrol:
self.sendSSARJ(float(args[0]))
def sendPSARJ(self, *args):
global psarjmc
psarjmc = args[0]
serialWrite("PSARJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'psarj'",(args[0],))
self.ids.statusbar.text = "PSARJ Value Sent: " + str(args[0])
def sendSSARJ(self, *args):
global ssarjmc
ssarjmc = args[0]
serialWrite("SSARJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ssarj'",(args[0],))
self.ids.statusbar.text = "SSARJ Value Sent: " + str(args[0])
def sendPTRRJ(self, *args):
global ptrrjmc
ptrrjmc = args[0]
serialWrite("PTRRJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'ptrrj'",(args[0],))
self.ids.statusbar.text = "PTRRJ Value Sent: " + str(args[0])
def sendSTRRJ(self, *args):
global strrjmc
strrjmc = args[0]
serialWrite("STRRJ=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'strrj'",(args[0],))
self.ids.statusbar.text = "STRRJ Value Sent: " + str(args[0])
def sendBeta1B(self, *args):
global beta1bmc
beta1bmc = args[0]
serialWrite("B1B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1b'",(args[0],))
self.ids.statusbar.text = "Beta1B Value Sent: " + str(args[0])
def sendBeta1A(self, *args):
global beta1amc
beta1amc = args[0]
serialWrite("B1A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta1a'",(args[0],))
self.ids.statusbar.text = "Beta1A Value Sent: " + str(args[0])
def sendBeta2B(self, *args):
global beta2bmc
beta2bmc = args[0]
serialWrite("B2B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2b'",(args[0],))
self.ids.statusbar.text = "Beta2B Value Sent: " + str(args[0])
def sendBeta2A(self, *args):
global beta2amc
beta2amc = args[0]
serialWrite("B2A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta2a'",(args[0],))
self.ids.statusbar.text = "Beta2A Value Sent: " + str(args[0])
def sendBeta3B(self, *args):
global beta3bmc
beta3bmc = args[0]
serialWrite("B3B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3b'",(args[0],))
self.ids.statusbar.text = "Beta3B Value Sent: " + str(args[0])
def sendBeta3A(self, *args):
global beta3amc
beta3amc = args[0]
serialWrite("B3A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta3a'",(args[0],))
self.ids.statusbar.text = "Beta3A Value Sent: " + str(args[0])
def sendBeta4B(self, *args):
global beta4bmc
beta4bmc = args[0]
serialWrite("B4B=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4b'",(args[0],))
self.ids.statusbar.text = "Beta4B Value Sent: " + str(args[0])
def sendBeta4A(self, *args):
global beta4amc
beta4amc = args[0]
serialWrite("B4A=" + str(args[0]) + " ")
c.execute("UPDATE telemetry SET Value = ? WHERE Label = 'beta4a'",(args[0],))
self.ids.statusbar.text = "Beta4A Value Sent: " + str(args[0])
def send0(self, *args):
global psarjmc,ssarjmc,ptrrjmc,strrjmc,beta1amc,beta1bmc,beta2amc,beta2bmc,beta3amc,beta3bmc,beta4amc,beta4bmc
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta1b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta2b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta3b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4a'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'beta4b'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'psarj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'ssarj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'ptrrj'")
c.execute("UPDATE telemetry SET Value = '0' WHERE Label = 'strrj'")
strrjmc = 0
ptrrjmc = 0
ssarjmc = 0
psarjmc = 0
beta1bmc = 0
beta1amc = 0
beta2bmc = 0
beta2amc = 0
beta3bmc = 0
beta3amc = 0
beta4bmc = 0
beta4amc = 0
self.ids.statusbar.text = "0 sent to all"
serialWrite("B1A=0 ")
serialWrite("B1B=0 ")
serialWrite("B2A=0 ")
serialWrite("B2B=0 ")
serialWrite("B3A=0 ")
serialWrite("B3B=0 ")
serialWrite("B4A=0 ")
serialWrite("B4B=0 ")
serialWrite("PSARJ=0 ")
serialWrite("SSARJ=0 ")
serialWrite("PTRRJ=0 ")
serialWrite("STRRJ=0 ")
def send90(self, *args):
global psarjmc,ssarjmc,ptrrjmc,strrjmc,beta1amc,beta1bmc,beta2amc,beta2bmc,beta3amc,beta3bmc,beta4amc,beta4bmc
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta1a'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta1b'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta2a'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta2b'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta3a'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta3b'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta4a'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'beta4b'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'psarj'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'ssarj'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'ptrrj'")
c.execute("UPDATE telemetry SET Value = '90' WHERE Label = 'strrj'")
strrjmc = 90
ptrrjmc = 90
ssarjmc = 90
psarjmc = 90
beta1bmc = 90
beta1amc = 90
beta2bmc = 90
beta2amc = 90
beta3bmc = 90
beta3amc = 90
beta4bmc = 90
beta4amc = 90
self.ids.statusbar.text = "90 sent to all"
serialWrite("B1A=90 ")
serialWrite("B1B=90 ")
serialWrite("B2A=90 ")
serialWrite("B2B=90 ")
serialWrite("B3A=90 ")
serialWrite("B3B=90 ")
serialWrite("B4A=90 ")
serialWrite("B4B=90 ")
serialWrite("PSARJ=90 ")
serialWrite("SSARJ=90 ")
serialWrite("PTRRJ=90 ")
serialWrite("STRRJ=90 ")
class FakeOrbitScreen(Screen):
def changeDemoBoolean(self, *args):
global demoboolean
demoboolean = args[0]
def HTVpopup(self, *args): #not fully working
HTVpopup = Popup(title='HTV Berthing Orbit', content=Label(text='This will playback recorded data from when the Japanese HTV spacecraft berthed to the ISS. During berthing, the SARJs and nadir BGAs lock but the zenith BGAs autotrack'), text_size=self.size, size_hint=(0.5, 0.3), auto_dismiss=True)
HTVpopup.text_size = self.size
HTVpopup.open()
def startDisco(*args):
global p2, runningDemo, Disco
if not runningDemo:
p2 = Popen(mimic_directory + "/Mimic/Pi/disco.sh")
runningDemo = True
Disco = True
logWrite("Successfully started Disco script")
def startDemo(*args):
global p2, runningDemo
if not runningDemo:
p2 = Popen(mimic_directory + "/Mimic/Pi/demoOrbit.sh")
runningDemo = True
logWrite("Successfully started Demo Orbit script")
def stopDemo(*args):
global p2, runningDemo
try:
p2.kill()
except Exception:
pass
else:
runningDemo = False
def startHTVDemo(*args):
global p2, runningDemo
if not runningDemo:
p2 = Popen(mimic_directory + "/Mimic/Pi/demoHTVOrbit.sh")
runningDemo = True
logWrite("Successfully started Demo HTV Orbit script")
def stopHTVDemo(*args):
global p2, runningDemo
try:
p2.kill()
except Exception:
pass
else:
logWrite("Successfully stopped Demo HTV Orbit script")
runningDemo = False
class Settings_Screen(Screen, EventDispatcher):
def checkbox_clicked(*args):
if args[2]:
serialWrite("SmartRolloverBGA=1 ")
else:
serialWrite("SmartRolloverBGA=0 ")
class Orbit_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class Orbit_Pass(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class Orbit_Data(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class ISS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
def selectModule(*args): #used for choosing a module on screen to light up
global module
module = str(args[1])
class ECLSS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class EPS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class CT_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class CT_SASA_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class CT_Camera_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class CT_UHF_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class CT_SGANT_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class GNC_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class EVA_Main_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class EVA_US_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class EVA_RS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class EVA_Pictures(Screen, EventDispatcher):
pass
class TCS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class RS_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class Crew_Screen(Screen, EventDispatcher):
pass
class MSS_MT_Screen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
class MimicScreen(Screen, EventDispatcher):
signalcolor = ObjectProperty([1, 1, 1])
def changeMimicBoolean(self, *args):
global mimicbutton
mimicbutton = args[0]
def startproc(*args):
global p,TDRSproc
logWrite("Telemetry Subprocess start")
p = Popen(["node", mimic_directory + "/Mimic/Pi/ISS_Telemetry.js"]) #uncomment if live data comes back :D :D :D :D WE SAVED ISSLIVE
TDRSproc = Popen(["python3", mimic_directory + "/Mimic/Pi/TDRScheck.py"]) #uncomment if live data comes back :D :D :D :D WE SAVED ISSLIVE
#p = Popen([mimic_directory + "/Mimic/Pi/RecordedData/playback.out",mimic_directory + "/Mimic/Pi/RecordedData/Data"])
def killproc(*args):
global p,p2,c
c.execute("INSERT OR IGNORE INTO telemetry VALUES('Lightstreamer', '0', 'Unsubscribed', '0', 0)")
try:
p.kill()
p2.kill()
TDRSproc.kill()
except Exception:
pass
class MainScreenManager(ScreenManager):
pass
class MainApp(App):
def build(self):
global startup, ScreenList, stopAnimation
self.main_screen = MainScreen(name = 'main')
self.mimic_screen = MimicScreen(name = 'mimic')
self.iss_screen = ISS_Screen(name = 'iss')
self.eclss_screen = ECLSS_Screen(name = 'eclss')
self.control_screen = ManualControlScreen(name = 'manualcontrol')
self.orbit_screen = Orbit_Screen(name = 'orbit')
self.orbit_pass = Orbit_Pass(name = 'orbit_pass')
self.orbit_data = Orbit_Data(name = 'orbit_data')
self.fakeorbit_screen = FakeOrbitScreen(name = 'fakeorbit')
self.eps_screen = EPS_Screen(name = 'eps')
self.ct_screen = CT_Screen(name = 'ct')
self.ct_sasa_screen = CT_SASA_Screen(name = 'ct_sasa')
self.ct_uhf_screen = CT_UHF_Screen(name = 'ct_uhf')
self.ct_camera_screen = CT_Camera_Screen(name = 'ct_camera')
self.ct_sgant_screen = CT_SGANT_Screen(name = 'ct_sgant')
self.gnc_screen = GNC_Screen(name = 'gnc')
self.tcs_screen = TCS_Screen(name = 'tcs')
self.crew_screen = Crew_Screen(name = 'crew')
self.settings_screen = Settings_Screen(name = 'settings')
self.us_eva = EVA_US_Screen(name='us_eva')
self.rs_eva = EVA_RS_Screen(name='rs_eva')
self.rs_screen = RS_Screen(name='rs')
self.mss_mt_screen = MSS_MT_Screen(name='mt')
self.eva_main = EVA_Main_Screen(name='eva_main')
self.eva_pictures = EVA_Pictures(name='eva_pictures')
#Add all new telemetry screens to this list, this is used for the signal status icon and telemetry value colors
ScreenList = ['tcs_screen', 'eps_screen', 'iss_screen', 'eclss_screen',
'ct_screen', 'ct_sasa_screen', 'ct_sgant_screen', 'ct_uhf_screen',
'ct_camera_screen', 'gnc_screen', 'orbit_screen', 'us_eva', 'rs_eva',
'eva_main', 'mimic_screen', 'mss_mt_screen','orbit_pass','orbit_data']
root = MainScreenManager(transition=SwapTransition())
root.add_widget(self.main_screen)
root.add_widget(self.control_screen)
root.add_widget(self.mimic_screen)
root.add_widget(self.fakeorbit_screen)
root.add_widget(self.orbit_screen)
root.add_widget(self.orbit_pass)
root.add_widget(self.orbit_data)
root.add_widget(self.iss_screen)
root.add_widget(self.eclss_screen)
root.add_widget(self.eps_screen)
root.add_widget(self.ct_screen)
root.add_widget(self.ct_sasa_screen)
root.add_widget(self.ct_uhf_screen)
root.add_widget(self.ct_camera_screen)
root.add_widget(self.ct_sgant_screen)
root.add_widget(self.gnc_screen)
root.add_widget(self.us_eva)
root.add_widget(self.rs_eva)
root.add_widget(self.rs_screen)
root.add_widget(self.mss_mt_screen)
root.add_widget(self.eva_main)
root.add_widget(self.eva_pictures)
root.add_widget(self.tcs_screen)
root.add_widget(self.crew_screen)
root.add_widget(self.settings_screen)
root.current = 'main' #change this back to main when done with eva setup
Clock.schedule_interval(self.update_labels, 1) #all telemetry wil refresh and get pushed to arduinos every half second!
Clock.schedule_interval(self.animate3, 0.1)
Clock.schedule_interval(self.orbitUpdate, 1)
Clock.schedule_interval(self.checkCrew, 600)
if startup:
startup = False
Clock.schedule_once(self.checkCrew, 30)
Clock.schedule_once(self.checkBlogforEVA, 30)
Clock.schedule_once(self.getTLE, 15) #uncomment when internet works again
Clock.schedule_once(self.TDRSupdate, 30) #uncomment when internet works again
Clock.schedule_interval(self.getTLE, 300)
Clock.schedule_interval(self.TDRSupdate, 600)
Clock.schedule_interval(self.check_internet, 1)
#schedule the orbitmap to update with shadow every 5 mins
Clock.schedule_interval(self.updateNightShade, 120)
Clock.schedule_interval(self.updateOrbitMap, 10)
Clock.schedule_interval(self.checkTDRS, 5)
return root
def check_internet(self, dt):
global internet
def on_success(req, result):
global internet
internet = True
def on_redirect(req, result):
global internet
internet = True
def on_failure(req, result):
global internet
internet = False
def on_error(req, result):
global internet
internet = False
req = UrlRequest("http://google.com", on_success, on_redirect, on_failure, on_error, timeout=1)
def deleteURLPictures(self, dt):
logWrite("Function call - deleteURLPictures")
global EVA_picture_urls
del EVA_picture_urls[:]
EVA_picture_urls[:] = []
def changePictures(self, dt):
logWrite("Function call - changeURLPictures")
global EVA_picture_urls
global urlindex
urlsize = len(EVA_picture_urls)
if urlsize > 0:
self.us_eva.ids.EVAimage.source = EVA_picture_urls[urlindex]
self.eva_pictures.ids.EVAimage.source = EVA_picture_urls[urlindex]
urlindex = urlindex + 1
if urlindex > urlsize-1:
urlindex = 0
def updateOrbitMap(self, dt):
self.orbit_screen.ids.OrbitMap.source = mimic_directory + '/Mimic/Pi/imgs/orbit/map.jpg'
self.orbit_screen.ids.OrbitMap.reload()
def updateNightShade(self, dt):
proc = Popen(["python3", mimic_directory + "/Mimic/Pi/NightShade.py"])
def checkTDRS(self, dt):
global activeTDRS1
global activeTDRS2
def check_EVA_stats(self, lastname1, firstname1, lastname2, firstname2):
global numEVAs1, EVAtime_hours1, EVAtime_minutes1, numEVAs2, EVAtime_hours2, EVAtime_minutes2
logWrite("Function call - check EVA stats")
eva_url = 'http://www.spacefacts.de/eva/e_eva_az.htm'
def on_success(req, result):
logWrite("Check EVA Stats - Successs")
soup = BeautifulSoup(result, 'html.parser') #using bs4 to parse website
numEVAs1 = 0
EVAtime_hours1 = 0
EVAtime_minutes1 = 0
numEVAs2 = 0
EVAtime_hours2 = 0
EVAtime_minutes2 = 0
tabletags = soup.find_all("td")
for tag in tabletags:
if lastname1 in tag.text:
if firstname1 in tag.find_next_sibling("td").text:
numEVAs1 = tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text
EVAtime_hours1 = int(tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text)
EVAtime_minutes1 = int(tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text)
EVAtime_minutes1 += (EVAtime_hours1 * 60)
for tag in tabletags:
if lastname2 in tag.text:
if firstname2 in tag.find_next_sibling("td").text:
numEVAs2 = tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text
EVAtime_hours2 = int(tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text)
EVAtime_minutes2 = int(tag.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text)
EVAtime_minutes2 += (EVAtime_hours2 * 60)
EV1_EVA_number = numEVAs1
EV1_EVA_time = EVAtime_minutes1
EV2_EVA_number = numEVAs2
EV2_EVA_time = EVAtime_minutes2
EV1_minutes = str(EV1_EVA_time%60).zfill(2)
EV2_minutes = str(EV2_EVA_time%60).zfill(2)
EV1_hours = int(EV1_EVA_time/60)
EV2_hours = int(EV2_EVA_time/60)
self.us_eva.ids.EV1.text = " (EV): " + str(firstname1) + " " + str(lastname1)
self.us_eva.ids.EV2.text = " (EV): " + str(firstname2) + " " + str(lastname2)
self.us_eva.ids.EV1_EVAnum.text = "Number of EVAs = " + str(EV1_EVA_number)
self.us_eva.ids.EV2_EVAnum.text = "Number of EVAs = " + str(EV2_EVA_number)
self.us_eva.ids.EV1_EVAtime.text = "Total EVA Time = " + str(EV1_hours) + "h " + str(EV1_minutes) + "m"
self.us_eva.ids.EV2_EVAtime.text = "Total EVA Time = " + str(EV2_hours) + "h " + str(EV2_minutes) + "m"
def on_redirect(req, result):
logWrite("Warning - EVA stats failure (redirect)")
def on_failure(req, result):
logWrite("Warning - EVA stats failure (url failure)")
def on_error(req, result):
logWrite("Warning - EVA stats failure (url error)")
#obtain eva statistics web page for parsing
req = UrlRequest(eva_url, on_success, on_redirect, on_failure, on_error, timeout=1)
def checkBlogforEVA(self, dt):
iss_blog_url = 'https://blogs.nasa.gov/spacestation/tag/spacewalk/'
def on_success(req, data): #if blog data is successfully received, it is processed here
logWrite("Blog Success")
soup = BeautifulSoup(data, "lxml")
blog_entries = soup.find("div", {"class": "entry-content"})
blog_text = blog_entries.get_text()
iss_EVcrew_url = 'https://www.howmanypeopleareinspacerightnow.com/peopleinspace.json'
def on_success2(req2, data2):
logWrite("Successfully fetched EV crew JSON")
number_of_space = int(data2['number'])
names = []
for num in range(0, number_of_space):
names.append(str(data2['people'][num]['name']))
try:
self.checkBlog(names,blog_text)
except Exception as e:
logWrite("Error checking blog: " + str(e))
def on_redirect2(req, result):
logWrite("Warning - Get EVA crew failure (redirect)")
logWrite(result)
def on_failure2(req, result):
logWrite("Warning - Get EVA crew failure (url failure)")
def on_error2(req, result):
logWrite("Warning - Get EVA crew failure (url error)")
req2 = UrlRequest(iss_EVcrew_url, on_success2, on_redirect2, on_failure2, on_error2, timeout=1)
def on_redirect(req, result):
logWrite("Warning - Get nasa blog failure (redirect)")
def on_failure(req, result):
logWrite("Warning - Get nasa blog failure (url failure)")
def on_error(req, result):
logWrite("Warning - Get nasa blog failure (url error)")
req = UrlRequest(iss_blog_url, on_success, on_redirect, on_failure, on_error, timeout=1)
def checkBlog(self, names, blog_text): #takes the nasa blog and compares it to people in space
ev1_surname = ''
ev1_firstname = ''
ev2_surname = ''
ev2_firstname = ''
ev1name = ''
ev2name = ''
name_position = 1000000
for name in names: #search for text in blog that matchs people in space list, choose 1st result as likely EV1
if name in blog_text:
if blog_text.find(name) < name_position:
name_position = blog_text.find(name)
ev1name = name
name_position = 1000000
for name in names: #search for text in blog that matchs people in space list, choose 2nd result as likely EV2
if name in blog_text and name != ev1name:
if blog_text.find(name) < name_position:
name_position = blog_text.find(name)
ev2name = name
logWrite("Likely EV1: "+ev1name)
logWrite("Likely EV2: "+ev2name)
ev1_surname = ev1name.split()[-1]
ev1_firstname = ev1name.split()[0]
ev2_surname = ev2name.split()[-1]
ev2_firstname = ev2name.split()[0]
try:
self.check_EVA_stats(ev1_surname,ev1_firstname,ev2_surname,ev2_firstname)
except Exception as e:
logWrite("Error retrieving EVA stats: " + str(e))
def flashUS_EVAbutton(self, instance):
logWrite("Function call - flashUS_EVA")
self.eva_main.ids.US_EVA_Button.background_color = (0, 0, 1, 1)
def reset_color(*args):
self.eva_main.ids.US_EVA_Button.background_color = (1, 1, 1, 1)
Clock.schedule_once(reset_color, 0.5)
def flashRS_EVAbutton(self, instance):
logWrite("Function call - flashRS_EVA")
self.eva_main.ids.RS_EVA_Button.background_color = (0, 0, 1, 1)
def reset_color(*args):
self.eva_main.ids.RS_EVA_Button.background_color = (1, 1, 1, 1)
Clock.schedule_once(reset_color, 0.5)
def flashEVAbutton(self, instance):
logWrite("Function call - flashEVA")
self.mimic_screen.ids.EVA_button.background_color = (0, 0, 1, 1)
def reset_color(*args):
self.mimic_screen.ids.EVA_button.background_color = (1, 1, 1, 1)
Clock.schedule_once(reset_color, 0.5)
def EVA_clock(self, dt):
global seconds, minutes, hours, EVAstartTime
unixconvert = time.gmtime(time.time())
currenthours = float(unixconvert[7])*24+unixconvert[3]+float(unixconvert[4])/60+float(unixconvert[5])/3600
difference = (currenthours-EVAstartTime)*3600
minutes, seconds = divmod(difference, 60)
hours, minutes = divmod(minutes, 60)
hours = int(hours)
minutes = int(minutes)
seconds = int(seconds)
self.us_eva.ids.EVA_clock.text =(str(hours) + ":" + str(minutes).zfill(2) + ":" + str(int(seconds)).zfill(2))
self.us_eva.ids.EVA_clock.color = 0.33, 0.7, 0.18
def animate(self, instance):
global new_x2, new_y2
self.main_screen.ids.ISStiny2.size_hint = 0.07, 0.07
new_x2 = new_x2+0.007
new_y2 = (math.sin(new_x2*30)/18)+0.75
if new_x2 > 1:
new_x2 = new_x2-1.0
self.main_screen.ids.ISStiny2.pos_hint = {"center_x": new_x2, "center_y": new_y2}
def animate3(self, instance):
global new_x, new_y, sizeX, sizeY, startingAnim
if new_x<0.886:
new_x = new_x+0.007
new_y = (math.sin(new_x*30)/18)+0.75
self.main_screen.ids.ISStiny.pos_hint = {"center_x": new_x, "center_y": new_y}
else:
if sizeX <= 0.15:
sizeX = sizeX + 0.01
sizeY = sizeY + 0.01
self.main_screen.ids.ISStiny.size_hint = sizeX, sizeY
else:
if startingAnim:
Clock.schedule_interval(self.animate, 0.1)
startingAnim = False
def changeColors(self, *args): #this function sets all labels on mimic screen to a certain color based on signal status
#the signalcolor is a kv property that will update all signal status dependant values to whatever color is received by this function
global ScreenList
for x in ScreenList:
getattr(self, x).signalcolor = args[0], args[1], args[2]
def changeManualControlBoolean(self, *args):
global manualcontrol
manualcontrol = args[0]
def TDRSupdate(self, dt):
global TDRS12_TLE, TDRS6_TLE, TDRS10_TLE, TDRS11_TLE, TDRS7_TLE
normalizedX = self.orbit_screen.ids.OrbitMap.norm_image_size[0] / self.orbit_screen.ids.OrbitMap.texture_size[0]
normalizedY = self.orbit_screen.ids.OrbitMap.norm_image_size[1] / self.orbit_screen.ids.OrbitMap.texture_size[1]
def scaleLatLon(latitude, longitude):
#converting lat lon to x, y for orbit map
fromLatSpan = 180.0
fromLonSpan = 360.0
toLatSpan = 0.598
toLonSpan = 0.716
valueLatScaled = (float(latitude)+90.0)/float(fromLatSpan)
valueLonScaled = (float(longitude)+180.0)/float(fromLonSpan)
newLat = (0.265) + (valueLatScaled * toLatSpan)
newLon = (0.14) + (valueLonScaled * toLonSpan)
return {'newLat': newLat, 'newLon': newLon}
def scaleLatLon2(in_latitude,in_longitude):
MAP_HEIGHT = self.orbit_screen.ids.OrbitMap.texture_size[1]
MAP_WIDTH = self.orbit_screen.ids.OrbitMap.texture_size[0]
new_x = ((MAP_WIDTH / 360.0) * (180 + in_longitude))
new_y = ((MAP_HEIGHT / 180.0) * (90 + in_latitude))
return {'new_y': new_y, 'new_x': new_x}
#TDRS East 2 sats
try:
TDRS12_TLE.compute(datetime.utcnow()) #41 West
except NameError:
TDRS12lon = -41
TDRS12lat = 0
else:
TDRS12lon = float(str(TDRS12_TLE.sublong).split(':')[0]) + float(str(TDRS12_TLE.sublong).split(':')[1])/60 + float(str(TDRS12_TLE.sublong).split(':')[2])/3600
TDRS12lat = float(str(TDRS12_TLE.sublat).split(':')[0]) + float(str(TDRS12_TLE.sublat).split(':')[1])/60 + float(str(TDRS12_TLE.sublat).split(':')[2])/3600
TDRS12_groundtrack = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(days=1):
TDRS12_TLE.compute(date_i)
TDRS12lon_gt = float(str(TDRS12_TLE.sublong).split(':')[0]) + float(
str(TDRS12_TLE.sublong).split(':')[1]) / 60 + float(str(TDRS12_TLE.sublong).split(':')[2]) / 3600
TDRS12lat_gt = float(str(TDRS12_TLE.sublat).split(':')[0]) + float(
str(TDRS12_TLE.sublat).split(':')[1]) / 60 + float(str(TDRS12_TLE.sublat).split(':')[2]) / 3600
TDRS12_groundtrack.append(scaleLatLon2(TDRS12lat_gt, TDRS12lon_gt)['new_x'])
TDRS12_groundtrack.append(scaleLatLon2(TDRS12lat_gt, TDRS12lon_gt)['new_y'])
date_i += timedelta(minutes=10)
self.orbit_screen.ids.TDRS12groundtrack.width = 1
self.orbit_screen.ids.TDRS12groundtrack.col = (0,0,1,1)
self.orbit_screen.ids.TDRS12groundtrack.points = TDRS12_groundtrack
try:
TDRS6_TLE.compute(datetime.utcnow()) #46 West
except NameError:
TDRS6lon = -46
TDRS6lat = 0
else:
TDRS6lon = float(str(TDRS6_TLE.sublong).split(':')[0]) + float(str(TDRS6_TLE.sublong).split(':')[1])/60 + float(str(TDRS6_TLE.sublong).split(':')[2])/3600
TDRS6lat = float(str(TDRS6_TLE.sublat).split(':')[0]) + float(str(TDRS6_TLE.sublat).split(':')[1])/60 + float(str(TDRS6_TLE.sublat).split(':')[2])/3600
TDRS6_groundtrack = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(days=1):
TDRS6_TLE.compute(date_i)
TDRS6lon_gt = float(str(TDRS6_TLE.sublong).split(':')[0]) + float(
str(TDRS6_TLE.sublong).split(':')[1]) / 60 + float(str(TDRS6_TLE.sublong).split(':')[2]) / 3600
TDRS6lat_gt = float(str(TDRS6_TLE.sublat).split(':')[0]) + float(
str(TDRS6_TLE.sublat).split(':')[1]) / 60 + float(str(TDRS6_TLE.sublat).split(':')[2]) / 3600
TDRS6_groundtrack.append(scaleLatLon2(TDRS6lat_gt, TDRS6lon_gt)['new_x'])
TDRS6_groundtrack.append(scaleLatLon2(TDRS6lat_gt, TDRS6lon_gt)['new_y'])
date_i += timedelta(minutes=10)
self.orbit_screen.ids.TDRS6groundtrack.width = 1
self.orbit_screen.ids.TDRS6groundtrack.col = (0,0,1,1)
self.orbit_screen.ids.TDRS6groundtrack.points = TDRS6_groundtrack
#TDRS West 2 sats
try:
TDRS11_TLE.compute(datetime.utcnow()) #171 West
except NameError:
TDRS11lon = -171
TDRS11lat = 0
else:
TDRS11lon = float(str(TDRS11_TLE.sublong).split(':')[0]) + float(str(TDRS11_TLE.sublong).split(':')[1])/60 + float(str(TDRS11_TLE.sublong).split(':')[2])/3600
TDRS11lat = float(str(TDRS11_TLE.sublat).split(':')[0]) + float(str(TDRS11_TLE.sublat).split(':')[1])/60 + float(str(TDRS11_TLE.sublat).split(':')[2])/3600
TDRS11_groundtrack = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(days=1):
TDRS11_TLE.compute(date_i)
TDRS11lon_gt = float(str(TDRS11_TLE.sublong).split(':')[0]) + float(
str(TDRS11_TLE.sublong).split(':')[1]) / 60 + float(str(TDRS11_TLE.sublong).split(':')[2]) / 3600
TDRS11lat_gt = float(str(TDRS11_TLE.sublat).split(':')[0]) + float(
str(TDRS11_TLE.sublat).split(':')[1]) / 60 + float(str(TDRS11_TLE.sublat).split(':')[2]) / 3600
TDRS11_groundtrack.append(scaleLatLon2(TDRS11lat_gt, TDRS11lon_gt)['new_x'])
TDRS11_groundtrack.append(scaleLatLon2(TDRS11lat_gt, TDRS11lon_gt)['new_y'])
date_i += timedelta(minutes=10)
self.orbit_screen.ids.TDRS11groundtrack.width = 1
self.orbit_screen.ids.TDRS11groundtrack.col = (0,0,1,1)
self.orbit_screen.ids.TDRS11groundtrack.points = TDRS11_groundtrack
try:
TDRS10_TLE.compute(datetime.utcnow()) #174 West
except NameError:
TDRS10lon = -174
TDRS10lat = 0
else:
TDRS10lon = float(str(TDRS10_TLE.sublong).split(':')[0]) + float(str(TDRS10_TLE.sublong).split(':')[1])/60 + float(str(TDRS10_TLE.sublong).split(':')[2])/3600
TDRS10lat = float(str(TDRS10_TLE.sublat).split(':')[0]) + float(str(TDRS10_TLE.sublat).split(':')[1])/60 + float(str(TDRS10_TLE.sublat).split(':')[2])/3600
TDRS10_groundtrack = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(days=1):
TDRS10_TLE.compute(date_i)
TDRS10lon_gt = float(str(TDRS10_TLE.sublong).split(':')[0]) + float(
str(TDRS10_TLE.sublong).split(':')[1]) / 60 + float(str(TDRS10_TLE.sublong).split(':')[2]) / 3600
TDRS10lat_gt = float(str(TDRS10_TLE.sublat).split(':')[0]) + float(
str(TDRS10_TLE.sublat).split(':')[1]) / 60 + float(str(TDRS10_TLE.sublat).split(':')[2]) / 3600
TDRS10_groundtrack.append(scaleLatLon2(TDRS10lat_gt, TDRS10lon_gt)['new_x'])
TDRS10_groundtrack.append(scaleLatLon2(TDRS10lat_gt, TDRS10lon_gt)['new_y'])
date_i += timedelta(minutes=10)
self.orbit_screen.ids.TDRS10groundtrack.width = 1
self.orbit_screen.ids.TDRS10groundtrack.col = (0,0,1,1)
self.orbit_screen.ids.TDRS10groundtrack.points = TDRS10_groundtrack
#ZOE TDRS-Z
try:
TDRS7_TLE.compute(datetime.utcnow()) #275 West
except NameError:
TDRS7lon = 85
TDRS7lat = 0
else:
TDRS7lon = float(str(TDRS7_TLE.sublong).split(':')[0]) + float(str(TDRS7_TLE.sublong).split(':')[1])/60 + float(str(TDRS7_TLE.sublong).split(':')[2])/3600
TDRS7lat = float(str(TDRS7_TLE.sublat).split(':')[0]) + float(str(TDRS7_TLE.sublat).split(':')[1])/60 + float(str(TDRS7_TLE.sublat).split(':')[2])/3600
TDRS7_groundtrack = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(days=1):
TDRS7_TLE.compute(date_i)
TDRS7lon_gt = float(str(TDRS7_TLE.sublong).split(':')[0]) + float(
str(TDRS7_TLE.sublong).split(':')[1]) / 60 + float(str(TDRS7_TLE.sublong).split(':')[2]) / 3600
TDRS7lat_gt = float(str(TDRS7_TLE.sublat).split(':')[0]) + float(
str(TDRS7_TLE.sublat).split(':')[1]) / 60 + float(str(TDRS7_TLE.sublat).split(':')[2]) / 3600
TDRS7_groundtrack.append(scaleLatLon2(TDRS7lat_gt, TDRS7lon_gt)['new_x'])
TDRS7_groundtrack.append(scaleLatLon2(TDRS7lat_gt, TDRS7lon_gt)['new_y'])
date_i += timedelta(minutes=10)
self.orbit_screen.ids.TDRS7groundtrack.width = 1
self.orbit_screen.ids.TDRS7groundtrack.col = (0,0,1,1)
self.orbit_screen.ids.TDRS7groundtrack.points = TDRS7_groundtrack
#draw the TDRS satellite locations
self.orbit_screen.ids.TDRS12.pos = (scaleLatLon2(TDRS12lat, TDRS12lon)['new_x']-((self.orbit_screen.ids.TDRS12.width/2)*normalizedX),scaleLatLon2(TDRS12lat, TDRS12lon)['new_y']-((self.orbit_screen.ids.TDRS12.height/2)*normalizedY))
self.orbit_screen.ids.TDRS6.pos = (scaleLatLon2(TDRS6lat, TDRS6lon)['new_x']-((self.orbit_screen.ids.TDRS6.width/2)*normalizedX),scaleLatLon2(TDRS6lat, TDRS6lon)['new_y']-((self.orbit_screen.ids.TDRS6.height/2)*normalizedY))
self.orbit_screen.ids.TDRS11.pos = (scaleLatLon2(TDRS11lat, TDRS11lon)['new_x']-((self.orbit_screen.ids.TDRS11.width/2)*normalizedX),scaleLatLon2(TDRS11lat, TDRS11lon)['new_y']-((self.orbit_screen.ids.TDRS11.height/2)*normalizedY))
self.orbit_screen.ids.TDRS10.pos = (scaleLatLon2(TDRS10lat, TDRS10lon)['new_x']-((self.orbit_screen.ids.TDRS10.width/2)*normalizedX),scaleLatLon2(TDRS10lat, TDRS10lon)['new_y']-((self.orbit_screen.ids.TDRS10.height/2)*normalizedY))
self.orbit_screen.ids.TDRS7.pos = (scaleLatLon2(TDRS7lat, TDRS7lon)['new_x']-((self.orbit_screen.ids.TDRS7.width/2)*normalizedX),scaleLatLon2(TDRS7lat, TDRS7lon)['new_y']-((self.orbit_screen.ids.TDRS7.height/2)*normalizedY))
#add labels and ZOE
self.orbit_screen.ids.TDRSeLabel.pos_hint = {"center_x": scaleLatLon(0, -41)['newLon']+0.06, "center_y": scaleLatLon(0, -41)['newLat']}
self.orbit_screen.ids.TDRSwLabel.pos_hint = {"center_x": scaleLatLon(0, -174)['newLon']+0.06, "center_y": scaleLatLon(0, -174)['newLat']}
self.orbit_screen.ids.TDRSzLabel.pos_hint = {"center_x": scaleLatLon(0, 85)['newLon']+0.05, "center_y": scaleLatLon(0, 85)['newLat']}
self.orbit_screen.ids.ZOE.pos_hint = {"center_x": scaleLatLon(0, 77)['newLon'], "center_y": scaleLatLon(0, 77)['newLat']}
self.orbit_screen.ids.ZOElabel.pos_hint = {"center_x": scaleLatLon(0, 77)['newLon'], "center_y": scaleLatLon(0, 77)['newLat']+0.1}
def orbitUpdate(self, dt):
global overcountry, ISS_TLE, ISS_TLE_Line1, ISS_TLE_Line2, ISS_TLE_Acquired, sgant_elevation, sgant_elevation_old, sgant_xelevation, aos, oldtdrs, tdrs, logged
global TDRS12_TLE, TDRS6_TLE, TDRS7_TLE, TDRS10_TLE, TDRS11_TLE, tdrs1, tdrs2, tdrs_timestamp
def scaleLatLon(latitude, longitude):
#converting lat lon to x, y for orbit map
fromLatSpan = 180.0
fromLonSpan = 360.0
toLatSpan = 0.598
toLonSpan = 0.716
valueLatScaled = (float(latitude)+90.0)/float(fromLatSpan)
valueLonScaled = (float(longitude)+180.0)/float(fromLonSpan)
newLat = (0.265) + (valueLatScaled * toLatSpan)
newLon = (0.14) + (valueLonScaled * toLonSpan)
return {'newLat': newLat, 'newLon': newLon}
def scaleLatLon2(in_latitude,in_longitude):
MAP_HEIGHT = self.orbit_screen.ids.OrbitMap.texture_size[1]
MAP_WIDTH = self.orbit_screen.ids.OrbitMap.texture_size[0]
new_x = ((MAP_WIDTH / 360.0) * (180 + in_longitude))
new_y = ((MAP_HEIGHT / 180.0) * (90 + in_latitude))
return {'new_y': new_y, 'new_x': new_x}
#copied from apexpy - copyright 2015 Christer van der Meeren MIT license
def subsolar(datetime):
year = datetime.year
doy = datetime.timetuple().tm_yday
ut = datetime.hour * 3600 + datetime.minute * 60 + datetime.second
if not 1601 <= year <= 2100:
raise ValueError('Year must be in [1601, 2100]')
yr = year - 2000
nleap = int(np.floor((year - 1601.0) / 4.0))
nleap -= 99
if year <= 1900:
ncent = int(np.floor((year - 1601.0) / 100.0))
ncent = 3 - ncent
nleap = nleap + ncent
l0 = -79.549 + (-0.238699 * (yr - 4.0 * nleap) + 3.08514e-2 * nleap)
g0 = -2.472 + (-0.2558905 * (yr - 4.0 * nleap) - 3.79617e-2 * nleap)
# Days (including fraction) since 12 UT on January 1 of IYR:
df = (ut / 86400.0 - 1.5) + doy
# Mean longitude of Sun:
lmean = l0 + 0.9856474 * df
# Mean anomaly in radians:
grad = np.radians(g0 + 0.9856003 * df)
# Ecliptic longitude:
lmrad = np.radians(lmean + 1.915 * np.sin(grad)
+ 0.020 * np.sin(2.0 * grad))
sinlm = np.sin(lmrad)
# Obliquity of ecliptic in radians:
epsrad = np.radians(23.439 - 4e-7 * (df + 365 * yr + nleap))
# Right ascension:
alpha = np.degrees(np.arctan2(np.cos(epsrad) * sinlm, np.cos(lmrad)))
# Declination, which is also the subsolar latitude:
sslat = np.degrees(np.arcsin(np.sin(epsrad) * sinlm))
# Equation of time (degrees):
etdeg = lmean - alpha
nrot = round(etdeg / 360.0)
etdeg = etdeg - 360.0 * nrot
# Subsolar longitude:
sslon = 180.0 - (ut / 240.0 + etdeg) # Earth rotates one degree every 240 s.
nrot = round(sslon / 360.0)
sslon = sslon - 360.0 * nrot
return sslat, sslon
if ISS_TLE_Acquired:
ISS_TLE.compute(datetime.utcnow())
#------------------Latitude/Longitude Stuff---------------------------
latitude = float(str(ISS_TLE.sublat).split(':')[0]) + float(str(ISS_TLE.sublat).split(':')[1])/60 + float(str(ISS_TLE.sublat).split(':')[2])/3600
longitude = float(str(ISS_TLE.sublong).split(':')[0]) + float(str(ISS_TLE.sublong).split(':')[1])/60 + float(str(ISS_TLE.sublong).split(':')[2])/3600
#inclination = ISS_TLE.inc
normalizedX = self.orbit_screen.ids.OrbitMap.norm_image_size[0] / self.orbit_screen.ids.OrbitMap.texture_size[0]
normalizedY = self.orbit_screen.ids.OrbitMap.norm_image_size[1] / self.orbit_screen.ids.OrbitMap.texture_size[1]
self.orbit_screen.ids.OrbitISStiny.pos = (
scaleLatLon2(latitude, longitude)['new_x'] - ((self.orbit_screen.ids.OrbitISStiny.width / 2) * normalizedX * 2), #had to fudge a little not sure why
scaleLatLon2(latitude, longitude)['new_y'] - ((self.orbit_screen.ids.OrbitISStiny.height / 2) * normalizedY * 2)) #had to fudge a little not sure why
#get the position of the sub solar point to add the sun icon to the map
sunlatitude, sunlongitude = subsolar(datetime.utcnow())
self.orbit_screen.ids.OrbitSun.pos = (
scaleLatLon2(int(sunlatitude), int(sunlongitude))['new_x'] - ((self.orbit_screen.ids.OrbitSun.width / 2) * normalizedX * 2), #had to fudge a little not sure why
scaleLatLon2(int(sunlatitude), int(sunlongitude))['new_y'] - ((self.orbit_screen.ids.OrbitSun.height / 2) * normalizedY * 2)) #had to fudge a little not sure why
#draw the ISS groundtrack behind and ahead of the 180 longitude cutoff
ISS_groundtrack = []
ISS_groundtrack2 = []
date_i = datetime.utcnow()
groundtrackdate = datetime.utcnow()
while date_i < groundtrackdate + timedelta(minutes=95):
ISS_TLE.compute(date_i)
ISSlon_gt = float(str(ISS_TLE.sublong).split(':')[0]) + float(
str(ISS_TLE.sublong).split(':')[1]) / 60 + float(str(ISS_TLE.sublong).split(':')[2]) / 3600
ISSlat_gt = float(str(ISS_TLE.sublat).split(':')[0]) + float(
str(ISS_TLE.sublat).split(':')[1]) / 60 + float(str(ISS_TLE.sublat).split(':')[2]) / 3600
if ISSlon_gt < longitude-1: #if the propagated groundtrack is behind the iss (i.e. wraps around the screen) add to new groundtrack line
ISS_groundtrack2.append(scaleLatLon2(ISSlat_gt, ISSlon_gt)['new_x'])
ISS_groundtrack2.append(scaleLatLon2(ISSlat_gt, ISSlon_gt)['new_y'])
else:
ISS_groundtrack.append(scaleLatLon2(ISSlat_gt, ISSlon_gt)['new_x'])
ISS_groundtrack.append(scaleLatLon2(ISSlat_gt, ISSlon_gt)['new_y'])
date_i += timedelta(seconds=60)
self.orbit_screen.ids.ISSgroundtrack.width = 1
self.orbit_screen.ids.ISSgroundtrack.col = (1, 0, 0, 1)
self.orbit_screen.ids.ISSgroundtrack.points = ISS_groundtrack
self.orbit_screen.ids.ISSgroundtrack2.width = 1
self.orbit_screen.ids.ISSgroundtrack2.col = (1, 0, 0, 1)
self.orbit_screen.ids.ISSgroundtrack2.points = ISS_groundtrack2
self.orbit_screen.ids.latitude.text = str("{:.2f}".format(latitude))
self.orbit_screen.ids.longitude.text = str("{:.2f}".format(longitude))
TDRScursor.execute('select TDRS1 from tdrs')
tdrs1 = int(TDRScursor.fetchone()[0])
TDRScursor.execute('select TDRS2 from tdrs')
tdrs2 = int(TDRScursor.fetchone()[0])
TDRScursor.execute('select Timestamp from tdrs')
tdrs_timestamp = TDRScursor.fetchone()[0]
# THIS SECTION NEEDS IMPROVEMENT
tdrs = "n/a"
self.ct_sgant_screen.ids.tdrs_east12.angle = (-1*longitude)-41
self.ct_sgant_screen.ids.tdrs_east6.angle = (-1*longitude)-46
self.ct_sgant_screen.ids.tdrs_z7.angle = ((-1*longitude)-41)+126
self.ct_sgant_screen.ids.tdrs_west11.angle = ((-1*longitude)-41)-133
self.ct_sgant_screen.ids.tdrs_west10.angle = ((-1*longitude)-41)-130
if ((tdrs1 or tdrs2) == 12) and float(aos) == 1.0:
tdrs = "east-12"
self.ct_sgant_screen.ids.tdrs_label.text = "TDRS-East-12"
if ((tdrs1 or tdrs2) == 6) and float(aos) == 1.0:
tdrs = "east-6"
self.ct_sgant_screen.ids.tdrs_label.text = "TDRS-East-6"
if ((tdrs1 or tdrs2) == 10) and float(aos) == 1.0:
tdrs = "west-10"
self.ct_sgant_screen.ids.tdrs_label.text = "TDRS-West-10"
if ((tdrs1 or tdrs2) == 11) and float(aos) == 1.0:
tdrs = "west-11"
self.ct_sgant_screen.ids.tdrs_label.text = "TDRS-West-11"
if ((tdrs1 or tdrs2) == 7) and float(aos) == 1.0:
tdrs = "z-7"
self.ct_sgant_screen.ids.tdrs_label.text = "TDRS-Z-7"
elif tdrs1 == 0 and tdrs2 == 0:
self.ct_sgant_screen.ids.tdrs_label.text = "-"
tdrs = "----"
self.ct_sgant_screen.ids.tdrs_z7.color = 1, 1, 1, 1
self.orbit_screen.ids.TDRSwLabel.color = (1,1,1,1)
self.orbit_screen.ids.TDRSeLabel.color = (1,1,1,1)
self.orbit_screen.ids.TDRSzLabel.color = (1,1,1,1)
self.orbit_screen.ids.TDRS11.col = (1,1,1,1)
self.orbit_screen.ids.TDRS10.col = (1,1,1,1)
self.orbit_screen.ids.TDRS12.col = (1,1,1,1)
self.orbit_screen.ids.TDRS6.col = (1,1,1,1)
self.orbit_screen.ids.TDRS7.col = (1,1,1,1)
self.orbit_screen.ids.ZOElabel.color = (1,1,1,1)
self.orbit_screen.ids.ZOE.col = (1,0.5,0,0.5)
if "10" in tdrs: #tdrs10 and 11 west
self.orbit_screen.ids.TDRSwLabel.color = (1,0,1,1)
self.orbit_screen.ids.TDRS10.col = (1,0,1,1)
if "11" in tdrs: #tdrs10 and 11 west
self.orbit_screen.ids.TDRSwLabel.color = (1,0,1,1)
self.orbit_screen.ids.TDRS11.col = (1,0,1,1)
self.orbit_screen.ids.TDRS10.col = (1,1,1,1)
if "6" in tdrs: #tdrs6 and 12 east
self.orbit_screen.ids.TDRSeLabel.color = (1,0,1,1)
self.orbit_screen.ids.TDRS6.col = (1,0,1,1)
if "12" in tdrs: #tdrs6 and 12 east
self.orbit_screen.ids.TDRSeLabel.color = (1,0,1,1)
self.orbit_screen.ids.TDRS12.col = (1,0,1,1)
if "7" in tdrs: #tdrs7 z
self.ct_sgant_screen.ids.tdrs_z7.color = 1, 1, 1, 1
self.orbit_screen.ids.TDRSzLabel.color = (1,0,1,1)
self.orbit_screen.ids.TDRS7.col = (1,0,1,1)
self.orbit_screen.ids.ZOElabel.color = 0, 0, 0, 0
self.orbit_screen.ids.ZOE.col = (0,0,0,0)
#------------------Orbit Stuff---------------------------
now = datetime.utcnow()
mins = (now - now.replace(hour=0,minute=0,second=0,microsecond=0)).total_seconds()
orbits_today = math.floor((float(mins)/60)/90)
self.orbit_screen.ids.dailyorbit.text = str(int(orbits_today)) #display number of orbits since utc midnight
year = int('20' + str(ISS_TLE_Line1[18:20]))
decimal_days = float(ISS_TLE_Line1[20:32])
converted_time = datetime(year, 1 ,1) + timedelta(decimal_days - 1)
time_since_epoch = ((now - converted_time).total_seconds()) #convert time difference to hours
totalorbits = int(ISS_TLE_Line2[63:68]) + 100000 + int(float(time_since_epoch)/(90*60)) #add number of orbits since the tle was generated
self.orbit_screen.ids.totalorbits.text = str(totalorbits) #display number of orbits since utc midnight
#------------------ISS Pass Detection---------------------------
location = ephem.Observer()
location.lon = '-95:21:59' #will next to make these an input option
location.lat = '29:45:43'
location.elevation = 10
location.name = 'location'
location.horizon = '10'
location.pressure = 0
location.date = datetime.utcnow()
#use location to draw dot on orbit map
mylatitude = float(str(location.lat).split(':')[0]) + float(str(location.lat).split(':')[1])/60 + float(str(location.lat).split(':')[2])/3600
mylongitude = float(str(location.lon).split(':')[0]) + float(str(location.lon).split(':')[1])/60 + float(str(location.lon).split(':')[2])/3600
self.orbit_screen.ids.mylocation.col = (0,0,1,1)
self.orbit_screen.ids.mylocation.pos = (scaleLatLon2(mylatitude, mylongitude)['new_x']-((self.orbit_screen.ids.mylocation.width/2)*normalizedX),scaleLatLon2(mylatitude, mylongitude)['new_y']-((self.orbit_screen.ids.mylocation.height/2)*normalizedY))
def isVisible(pass_info):
def seconds_between(d1, d2):
return abs((d2 - d1).seconds)
def datetime_from_time(tr):
year, month, day, hour, minute, second = tr.tuple()
dt = dtime.datetime(year, month, day, hour, minute, int(second))
return dt
tr, azr, tt, altt, ts, azs = pass_info
max_time = datetime_from_time(tt)
location.date = max_time
sun = ephem.Sun()
sun.compute(location)
ISS_TLE.compute(location)
sun_alt = float(str(sun.alt).split(':')[0]) + float(str(sun.alt).split(':')[1])/60 + float(str(sun.alt).split(':')[2])/3600
visible = False
if ISS_TLE.eclipsed is False and -18 < sun_alt < -6:
visible = True
#on the pass screen add info for why not visible
return visible
ISS_TLE.compute(location) #compute tle propagation based on provided location
nextpassinfo = location.next_pass(ISS_TLE)
if nextpassinfo[0] is None:
self.orbit_screen.ids.iss_next_pass1.text = "n/a"
self.orbit_screen.ids.iss_next_pass2.text = "n/a"
self.orbit_screen.ids.countdown.text = "n/a"
else:
nextpassdatetime = datetime.strptime(str(nextpassinfo[0]), '%Y/%m/%d %H:%M:%S') #convert to datetime object for timezone conversion
nextpassinfo_format = nextpassdatetime.replace(tzinfo=pytz.utc)
localtimezone = pytz.timezone('America/Chicago')
localnextpass = nextpassinfo_format.astimezone(localtimezone)
self.orbit_screen.ids.iss_next_pass1.text = str(localnextpass).split()[0] #display next pass time
self.orbit_screen.ids.iss_next_pass2.text = str(localnextpass).split()[1].split('-')[0] #display next pass time
timeuntilnextpass = nextpassinfo[0] - location.date
nextpasshours = timeuntilnextpass*24.0
nextpassmins = (nextpasshours-math.floor(nextpasshours))*60
nextpassseconds = (nextpassmins-math.floor(nextpassmins))*60
if isVisible(nextpassinfo):
self.orbit_screen.ids.ISSvisible.text = "Visible Pass!"
else:
self.orbit_screen.ids.ISSvisible.text = "Not Visible"
self.orbit_screen.ids.countdown.text = str("{:.0f}".format(math.floor(nextpasshours))) + ":" + str("{:.0f}".format(math.floor(nextpassmins))) + ":" + str("{:.0f}".format(math.floor(nextpassseconds))) #display time until next pass
def getTLE(self, *args):
global ISS_TLE, ISS_TLE_Line1, ISS_TLE_Line2, ISS_TLE_Acquired
#iss_tle_url = 'https://spaceflight.nasa.gov/realdata/sightings/SSapplications/Post/JavaSSOP/orbit/ISS/SVPOST.html' #the rev counter on this page is wrong
iss_tle_url = 'https://www.celestrak.com/NORAD/elements/stations.txt'
tdrs_tle_url = 'https://www.celestrak.com/NORAD/elements/tdrss.txt'
def on_success(req, data): #if TLE data is successfully received, it is processed here
global ISS_TLE, ISS_TLE_Line1, ISS_TLE_Line2, ISS_TLE_Acquired
soup = BeautifulSoup(data, "lxml")
body = iter(soup.get_text().split('\n'))
results = []
for line in body:
if "ISS (ZARYA)" in line:
results.append(line)
results.append(next(body))
results.append(next(body))
break
results = [i.strip() for i in results]
if len(results) > 0:
ISS_TLE_Line1 = results[1]
ISS_TLE_Line2 = results[2]
ISS_TLE = ephem.readtle("ISS (ZARYA)", str(ISS_TLE_Line1), str(ISS_TLE_Line2))
ISS_TLE_Acquired = True
logWrite("ISS TLE Acquired!")
else:
logWrite("ISS TLE Not Acquired")
ISS_TLE_Acquired = False
def on_redirect(req, result):
logWrite("Warning - Get ISS TLE failure (redirect)")
logWrite(result)
def on_failure(req, result):
logWrite("Warning - Get ISS TLE failure (url failure)")
logWrite(result)
def on_error(req, result):
logWrite("Warning - Get ISS TLE failure (url error)")
logWrite(result)
def on_success2(req2, data2): #if TLE data is successfully received, it is processed here
#retrieve the TLEs for every TDRS that ISS talks too
global TDRS12_TLE,TDRS6_TLE,TDRS11_TLE,TDRS10_TLE,TDRS7_TLE
soup = BeautifulSoup(data2, "lxml")
body = iter(soup.get_text().split('\n'))
results = ['','','']
#TDRS 12 TLE
for line in body:
if "TDRS 12" in line:
results[0] = line
results[1] = next(body)
results[2] = next(body)
break
if len(results[1]) > 0:
TDRS12_TLE = ephem.readtle("TDRS 12", str(results[1]), str(results[2]))
logWrite("TDRS 12 TLE Success!")
else:
logWrite("TDRS 12 TLE not acquired")
results = ['','','']
body = iter(soup.get_text().split('\n'))
#TDRS 6 TLE
for line in body:
if "TDRS 6" in line:
results[0] = line
results[1] = next(body)
results[2] = next(body)
break
if len(results[1]) > 0:
TDRS6_TLE = ephem.readtle("TDRS 6", str(results[1]), str(results[2]))
logWrite("TDRS 6 TLE Success!")
else:
logWrite("TDRS 6 TLE not acquired")
results = ['','','']
body = iter(soup.get_text().split('\n'))
#TDRS 11 TLE
for line in body:
if "TDRS 11" in line:
results[0] = line
results[1] = next(body)
results[2] = next(body)
break
if len(results[1]) > 0:
TDRS11_TLE = ephem.readtle("TDRS 11", str(results[1]), str(results[2]))
logWrite("TDRS 11 TLE Success!")
else:
logWrite("TDRS 11 TLE not acquired")
results = ['','','']
body = iter(soup.get_text().split('\n'))
#TDRS 10 TLE
for line in body:
if "TDRS 10" in line:
results[0] = line
results[1] = next(body)
results[2] = next(body)
break
if len(results[1]) > 0:
TDRS10_TLE = ephem.readtle("TDRS 10", str(results[1]), str(results[2]))
logWrite("TDRS 10 TLE Success!")
else:
logWrite("TDRS 10 TLE not acquired")
results = ['','','']
body = iter(soup.get_text().split('\n'))
#TDRS 7 TLE
for line in body:
if "TDRS 7" in line:
results[0] = line
results[1] = next(body)
results[2] = next(body)
break
if len(results[1]) > 0:
TDRS7_TLE = ephem.readtle("TDRS 7", str(results[1]), str(results[2]))
logWrite("TDRS 7 TLE Success!")
else:
logWrite("TDRS 7 TLE not acquired")
def on_redirect2(req2, result):
logWrite("Warning - Get TDRS TLE failure (redirect)")
logWrite(result)
def on_failure2(req2, result):
logWrite("Warning - Get TDRS TLE failure (url failure)")
logWrite(result)
def on_error2(req2, result):
logWrite("Warning - Get TDRS TLE failure (url error)")
logWrite(result)
req = UrlRequest(iss_tle_url, on_success, on_redirect, on_failure, on_error, timeout=1)
req2 = UrlRequest(tdrs_tle_url, on_success2, on_redirect2, on_failure2, on_error2, timeout=1)
def checkCrew(self, dt):
iss_crew_url = 'https://www.howmanypeopleareinspacerightnow.com/peopleinspace.json'
urlsuccess = False
def on_success(req, data):
logWrite("Successfully fetched crew JSON")
isscrew = 0
crewmember = ['', '', '', '', '', '', '', '', '', '', '', '']
crewmemberbio = ['', '', '', '', '', '', '', '', '', '', '', '']
crewmembertitle = ['', '', '', '', '', '', '', '', '', '', '', '']
crewmemberdays = ['', '', '', '', '', '', '', '', '', '', '', '']
crewmemberpicture = ['', '', '', '', '', '', '', '', '', '', '', '']
crewmembercountry = ['', '', '', '', '', '', '', '', '', '', '', '']
now = datetime.utcnow()
number_of_space = int(data['number'])
for num in range(1, number_of_space+1):
if str(data['people'][num-1]['location']) == str("International Space Station"):
crewmember[isscrew] = str(data['people'][num-1]['name']) #.encode('utf-8')
crewmemberbio[isscrew] = str(data['people'][num-1]['bio'])
crewmembertitle[isscrew] = str(data['people'][num-1]['title'])
datetime_object = datetime.strptime(str(data['people'][num-1]['launchdate']), '%Y-%m-%d')
previousdays = int(data['people'][num-1]['careerdays'])
totaldaysinspace = str(now-datetime_object)
d_index = totaldaysinspace.index('d')
crewmemberdays[isscrew] = str(int(totaldaysinspace[:d_index])+previousdays)+" days in space"
crewmemberpicture[isscrew] = str(data['people'][num-1]['biophoto'])
crewmembercountry[isscrew] = str(data['people'][num-1]['country']).title()
if str(data['people'][num-1]['country'])==str('usa'):
crewmembercountry[isscrew] = str('USA')
isscrew = isscrew+1
self.crew_screen.ids.crew1.text = str(crewmember[0])
self.crew_screen.ids.crew1title.text = str(crewmembertitle[0])
self.crew_screen.ids.crew1country.text = str(crewmembercountry[0])
self.crew_screen.ids.crew1daysonISS.text = str(crewmemberdays[0])
#self.crew_screen.ids.crew1image.source = str(crewmemberpicture[0])
self.crew_screen.ids.crew2.text = str(crewmember[1])
self.crew_screen.ids.crew2title.text = str(crewmembertitle[1])
self.crew_screen.ids.crew2country.text = str(crewmembercountry[1])
self.crew_screen.ids.crew2daysonISS.text = str(crewmemberdays[1])
#self.crew_screen.ids.crew2image.source = str(crewmemberpicture[1])
self.crew_screen.ids.crew3.text = str(crewmember[2])
self.crew_screen.ids.crew3title.text = str(crewmembertitle[2])
self.crew_screen.ids.crew3country.text = str(crewmembercountry[2])
self.crew_screen.ids.crew3daysonISS.text = str(crewmemberdays[2])
#self.crew_screen.ids.crew3image.source = str(crewmemberpicture[2])
self.crew_screen.ids.crew4.text = str(crewmember[3])
self.crew_screen.ids.crew4title.text = str(crewmembertitle[3])
self.crew_screen.ids.crew4country.text = str(crewmembercountry[3])
self.crew_screen.ids.crew4daysonISS.text = str(crewmemberdays[3])
#self.crew_screen.ids.crew4image.source = str(crewmemberpicture[3])
self.crew_screen.ids.crew5.text = str(crewmember[4])
self.crew_screen.ids.crew5title.text = str(crewmembertitle[4])
self.crew_screen.ids.crew5country.text = str(crewmembercountry[4])
self.crew_screen.ids.crew5daysonISS.text = str(crewmemberdays[4])
#self.crew_screen.ids.crew5image.source = str(crewmemberpicture[4])
self.crew_screen.ids.crew6.text = str(crewmember[5])
self.crew_screen.ids.crew6title.text = str(crewmembertitle[5])
self.crew_screen.ids.crew6country.text = str(crewmembercountry[5])
self.crew_screen.ids.crew6daysonISS.text = str(crewmemberdays[5])
#self.crew_screen.ids.crew6image.source = str(crewmemberpicture[5])
#self.crew_screen.ids.crew7.text = str(crewmember[6])
#self.crew_screen.ids.crew7title.text = str(crewmembertitle[6])
#self.crew_screen.ids.crew7country.text = str(crewmembercountry[6])
#self.crew_screen.ids.crew7daysonISS.text = str(crewmemberdays[6])
#self.crew_screen.ids.crew7image.source = str(crewmemberpicture[6])
#self.crew_screen.ids.crew8.text = str(crewmember[7])
#self.crew_screen.ids.crew8title.text = str(crewmembertitle[7])
#self.crew_screen.ids.crew8country.text = str(crewmembercountry[7])
#self.crew_screen.ids.crew8daysonISS.text = str(crewmemberdays[7])
#self.crew_screen.ids.crew8image.source = str(crewmemberpicture[7]))
#self.crew_screen.ids.crew9.text = str(crewmember[8])
#self.crew_screen.ids.crew9title.text = str(crewmembertitle[8])
#self.crew_screen.ids.crew9country.text = str(crewmembercountry[8])
#self.crew_screen.ids.crew9daysonISS.text = str(crewmemberdays[8])
#self.crew_screen.ids.crew9image.source = str(crewmemberpicture[8])
#self.crew_screen.ids.crew10.text = str(crewmember[9])
#self.crew_screen.ids.crew10title.text = str(crewmembertitle[9])
#self.crew_screen.ids.crew10country.text = str(crewmembercountry[9])
#self.crew_screen.ids.crew10daysonISS.text = str(crewmemberdays[9])
#self.crew_screen.ids.crew10image.source = str(crewmemberpicture[9])
#self.crew_screen.ids.crew11.text = str(crewmember[10])
#self.crew_screen.ids.crew11title.text = str(crewmembertitle[10])
#self.crew_screen.ids.crew11country.text = str(crewmembercountry[10])
#self.crew_screen.ids.crew11daysonISS.text = str(crewmemberdays[10])
#self.crew_screen.ids.crew11image.source = str(crewmemberpicture[10])
#self.crew_screen.ids.crew12.text = str(crewmember[11])
#self.crew_screen.ids.crew12title.text = str(crewmembertitle[11])
#self.crew_screen.ids.crew12country.text = str(crewmembercountry[11])
#self.crew_screen.ids.crew12daysonISS.text = str(crewmemberdays[11])
#self.crew_screen.ids.crew12image.source = str(crewmemberpicture[11])
def on_redirect(req, result):
logWrite("Warning - checkCrew JSON failure (redirect)")
logWrite(result)
print(result)
def on_failure(req, result):
logWrite("Warning - checkCrew JSON failure (url failure)")
def on_error(req, result):
logWrite("Warning - checkCrew JSON failure (url error)")
req = UrlRequest(iss_crew_url, on_success, on_redirect, on_failure, on_error, timeout=1)
def map_rotation(self, args):
scalefactor = 0.083333
scaledValue = float(args)/scalefactor
return scaledValue
def map_psi_bar(self, args):
scalefactor = 0.015
scaledValue = (float(args)*scalefactor)+0.72
return scaledValue
def map_hold_bar(self, args):
scalefactor = 0.0015
scaledValue = (float(args)*scalefactor)+0.71
return scaledValue
def hold_timer(self, dt):
global seconds2, holdstartTime
logWrite("Function Call - hold timer")
unixconvert = time.gmtime(time.time())
currenthours = float(unixconvert[7])*24+unixconvert[3]+float(unixconvert[4])/60+float(unixconvert[5])/3600
seconds2 = (currenthours-EVAstartTime)*3600
seconds2 = int(seconds2)
new_bar_x = self.map_hold_bar(260-seconds2)
self.us_eva.ids.leak_timer.text = "~"+ str(int(seconds2)) + "s"
self.us_eva.ids.Hold_bar.pos_hint = {"center_x": new_bar_x, "center_y": 0.49}
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/LeakCheckLights.png'
def signal_unsubscribed(self): #change images, used stale signal image
global internet, ScreenList
if not internet:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/offline.png'
self.changeColors(0.5, 0.5, 0.5)
else:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/SignalClientLost.png'
self.changeColors(1, 0.5, 0)
for x in ScreenList:
getattr(self, x).ids.signal.size_hint_y = 0.112
def signal_lost(self):
global internet, ScreenList
if not internet:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/offline.png'
self.changeColors(0.5, 0.5, 0.5)
else:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/signalred.zip'
self.changeColors(1, 0, 0)
for x in ScreenList:
getattr(self, x).ids.signal.anim_delay = 0.4
for x in ScreenList:
getattr(self, x).ids.signal.size_hint_y = 0.112
def signal_acquired(self):
global internet, ScreenList
if not internet:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/offline.png'
self.changeColors(0.5, 0.5, 0.5)
else:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/pulse-transparent.zip'
self.changeColors(0, 1, 0)
for x in ScreenList:
getattr(self, x).ids.signal.anim_delay = 0.05
for x in ScreenList:
getattr(self, x).ids.signal.size_hint_y = 0.15
def signal_stale(self):
global internet, ScreenList
if not internet:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/offline.png'
self.changeColors(0.5, 0.5, 0.5)
else:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/SignalOrangeGray.png'
self.changeColors(1, 0.5, 0)
for x in ScreenList:
getattr(self, x).ids.signal.anim_delay = 0.12
for x in ScreenList:
getattr(self, x).ids.signal.size_hint_y = 0.112
def signal_client_offline(self):
global internet, ScreenList
if not internet:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/offline.png'
self.changeColors(0.5, 0.5, 0.5)
else:
for x in ScreenList:
getattr(self, x).ids.signal.source = mimic_directory + '/Mimic/Pi/imgs/signal/SignalClientLost.png'
self.changeColors(1, 0.5, 0)
for x in ScreenList:
getattr(self, x).ids.signal.anim_delay = 0.12
for x in ScreenList:
getattr(self, x).ids.signal.size_hint_y = 0.112
def update_labels(self, dt): #THIS IS THE IMPORTANT FUNCTION
global mimicbutton, switchtofake, demoboolean, runningDemo, fakeorbitboolean, psarj2, ssarj2, manualcontrol, aos, los, oldLOS, psarjmc, ssarjmc, ptrrjmc, strrjmc, beta1bmc, beta1amc, beta2bmc, beta2amc, beta3bmc, beta3amc, beta4bmc, beta4amc, US_EVAinProgress, position_x, position_y, position_z, velocity_x, velocity_y, velocity_z, altitude, velocity, iss_mass, testvalue, testfactor, airlock_pump, crewlockpres, leak_hold, firstcrossing, EVA_activities, repress, depress, oldAirlockPump, obtained_EVA_crew, EVAstartTime
global holdstartTime, LS_Subscription
global Disco, eva, standby, prebreath1, prebreath2, depress1, depress2, leakhold, repress
global EPSstorageindex, channel1A_voltage, channel1B_voltage, channel2A_voltage, channel2B_voltage, channel3A_voltage, channel3B_voltage, channel4A_voltage, channel4B_voltage, USOS_Power
global stationmode, sgant_elevation, sgant_xelevation
global tdrs, module
global old_mt_timestamp, old_mt_position, mt_speed
arduino_count = len(SERIAL_PORTS)
if arduino_count > 0:
self.mimic_screen.ids.arduino_count.text = str(arduino_count)
self.mimic_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/arduino_notransmit.png"
self.fakeorbit_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/arduino_notransmit.png"
self.fakeorbit_screen.ids.arduino_count.text = str(arduino_count)
else:
self.mimic_screen.ids.arduino_count.text = ""
self.fakeorbit_screen.ids.arduino_count.text = ""
self.mimic_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/arduino_offline.png"
self.fakeorbit_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/arduino_offline.png"
runningDemo = False
if arduino_count > 0:
self.mimic_screen.ids.mimicstartbutton.disabled = False
self.fakeorbit_screen.ids.DemoStart.disabled = False
self.fakeorbit_screen.ids.HTVDemoStart.disabled = False
self.control_screen.ids.set90.disabled = False
self.control_screen.ids.set0.disabled = False
if mimicbutton:
self.mimic_screen.ids.mimicstartbutton.disabled = True
self.mimic_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/Arduino_Transmit.zip"
else:
self.mimic_screen.ids.mimicstartbutton.disabled = False
else:
self.mimic_screen.ids.mimicstartbutton.disabled = True
self.mimic_screen.ids.mimicstartbutton.text = "Transmit"
self.fakeorbit_screen.ids.DemoStart.disabled = True
self.fakeorbit_screen.ids.HTVDemoStart.disabled = True
self.control_screen.ids.set90.disabled = True
self.control_screen.ids.set0.disabled = True
if runningDemo:
self.fakeorbit_screen.ids.DemoStart.disabled = True
self.fakeorbit_screen.ids.HTVDemoStart.disabled = True
self.fakeorbit_screen.ids.DemoStop.disabled = False
self.fakeorbit_screen.ids.HTVDemoStop.disabled = False
self.fakeorbit_screen.ids.arduino.source = mimic_directory + "/Mimic/Pi/imgs/signal/Arduino_Transmit.zip"
c.execute('select Value from telemetry')
values = c.fetchall()
c.execute('select Timestamp from telemetry')
timestamps = c.fetchall()
sub_status = str((values[255])[0]) #lightstreamer subscript checker
client_status = str((values[256])[0]) #lightstreamer client checker
psarj = "{:.2f}".format(float((values[0])[0]))
if not switchtofake:
psarj2 = float(psarj)
if not manualcontrol:
psarjmc = float(psarj)
ssarj = "{:.2f}".format(float((values[1])[0]))
if not switchtofake:
ssarj2 = float(ssarj)
if not manualcontrol:
ssarjmc = float(ssarj)
ptrrj = "{:.2f}".format(float((values[2])[0]))
if not manualcontrol:
ptrrjmc = float(ptrrj)
strrj = "{:.2f}".format(float((values[3])[0]))
if not manualcontrol:
strrjmc = float(strrj)
beta1b = "{:.2f}".format(float((values[4])[0]))
if not switchtofake:
beta1b2 = float(beta1b)
if not manualcontrol:
beta1bmc = float(beta1b)
beta1a = "{:.2f}".format(float((values[5])[0]))
if not switchtofake:
beta1a2 = float(beta1a)
if not manualcontrol:
beta1amc = float(beta1a)
beta2b = "{:.2f}".format(float((values[6])[0]))
if not switchtofake:
beta2b2 = float(beta2b) #+ 20.00
if not manualcontrol:
beta2bmc = float(beta2b)
beta2a = "{:.2f}".format(float((values[7])[0]))
if not switchtofake:
beta2a2 = float(beta2a)
if not manualcontrol:
beta2amc = float(beta2a)
beta3b = "{:.2f}".format(float((values[8])[0]))
if not switchtofake:
beta3b2 = float(beta3b)
if not manualcontrol:
beta3bmc = float(beta3b)
beta3a = "{:.2f}".format(float((values[9])[0]))
if not switchtofake:
beta3a2 = float(beta3a)
if not manualcontrol:
beta3amc = float(beta3a)
beta4b = "{:.2f}".format(float((values[10])[0]))
if not switchtofake:
beta4b2 = float(beta4b)
if not manualcontrol:
beta4bmc = float(beta4b)
beta4a = "{:.2f}".format(float((values[11])[0]))
if not switchtofake:
beta4a2 = float(beta4a) #+ 20.00
if not manualcontrol:
beta4amc = float(beta4a)
aos = "{:.2f}".format(int((values[12])[0]))
los = "{:.2f}".format(int((values[13])[0]))
sasa_el = "{:.2f}".format(float((values[14])[0]))
sasa_az = "{:.2f}".format(float((values[18])[0]))
active_sasa = int((values[54])[0])
sasa1_active = int((values[53])[0])
sasa2_active = int((values[52])[0])
if sasa1_active or sasa2_active:
sasa_xmit = True
else:
sasa_xmit = False
sgant_elevation = float((values[15])[0])
sgant_xelevation = float((values[17])[0])
sgant_transmit = float((values[41])[0])
uhf1_power = int((values[233])[0]) #0 = off, 1 = on, 3 = failed
uhf2_power = int((values[234])[0]) #0 = off, 1 = on, 3 = failed
uhf_framesync = int((values[235])[0]) #1 or 0
v1a = "{:.2f}".format(float((values[25])[0]))
channel1A_voltage[EPSstorageindex] = float(v1a)
v1b = "{:.2f}".format(float((values[26])[0]))
channel1B_voltage[EPSstorageindex] = float(v1b)
v2a = "{:.2f}".format(float((values[27])[0]))
channel2A_voltage[EPSstorageindex] = float(v2a)
v2b = "{:.2f}".format(float((values[28])[0]))
channel2B_voltage[EPSstorageindex] = float(v2b)
v3a = "{:.2f}".format(float((values[29])[0]))
channel3A_voltage[EPSstorageindex] = float(v3a)
v3b = "{:.2f}".format(float((values[30])[0]))
channel3B_voltage[EPSstorageindex] = float(v3b)
v4a = "{:.2f}".format(float((values[31])[0]))
channel4A_voltage[EPSstorageindex] = float(v4a)
v4b = "{:.2f}".format(float((values[32])[0]))
channel4B_voltage[EPSstorageindex] = float(v4b)
c1a = "{:.2f}".format(float((values[33])[0]))
c1b = "{:.2f}".format(float((values[34])[0]))
c2a = "{:.2f}".format(float((values[35])[0]))
c2b = "{:.2f}".format(float((values[36])[0]))
c3a = "{:.2f}".format(float((values[37])[0]))
c3b = "{:.2f}".format(float((values[38])[0]))
c4a = "{:.2f}".format(float((values[39])[0]))
c4b = "{:.2f}".format(float((values[40])[0]))
stationmode = float((values[46])[0]) #russian segment mode same as usos mode
#GNC Telemetry
rollerror = float((values[165])[0])
pitcherror = float((values[166])[0])
yawerror = float((values[167])[0])
quaternion0 = float((values[171])[0])
quaternion1 = float((values[172])[0])
quaternion2 = float((values[173])[0])
quaternion3 = float((values[174])[0])
def dot(a,b):
c = (a[0]*b[0])+(a[1]*b[1])+(a[2]*b[2])
return c
def cross(a,b):
c = [a[1]*b[2] - a[2]*b[1],
a[2]*b[0] - a[0]*b[2],
a[0]*b[1] - a[1]*b[0]]
return c
iss_mass = "{:.2f}".format(float((values[48])[0]))
#ISS state vectors
position_x = float((values[55])[0]) #km
position_y = float((values[56])[0]) #km
position_z = float((values[57])[0]) #km
velocity_x = float((values[58])[0])/1000.00 #convert to km/s
velocity_y = float((values[59])[0])/1000.00 #convert to km/s
velocity_z = float((values[60])[0])/1000.00 #convert to km/s
#test values from orbital mechanics book
#position_x = (-6045.00)
#position_y = (-3490.00)
#position_z = (2500.00)
#velocity_x = (-3.457)
#velocity_y = (6.618)
#velocity_z = (2.533)
pos_vec = [position_x, position_y, position_z]
vel_vec = [velocity_x, velocity_y, velocity_z]
altitude = "{:.2f}".format(math.sqrt(dot(pos_vec,pos_vec))-6371.00)
velocity = "{:.2f}".format(math.sqrt(dot(vel_vec,vel_vec)))
mu = 398600
if float(altitude) > 0:
pos_mag = math.sqrt(dot(pos_vec,pos_vec))
vel_mag = math.sqrt(dot(vel_vec,vel_vec))
v_radial = dot(vel_vec, pos_vec)/pos_mag
h_mom = cross(pos_vec,vel_vec)
h_mom_mag = math.sqrt(dot(h_mom,h_mom))
inc = math.acos(h_mom[2]/h_mom_mag)
self.orbit_data.ids.inc.text = "{:.2f}".format(math.degrees(inc))
node_vec = cross([0,0,1],h_mom)
node_mag = math.sqrt(dot(node_vec,node_vec))
raan = math.acos(node_vec[0]/node_mag)
if node_vec[1] < 0:
raan = math.radians(360) - raan
self.orbit_data.ids.raan.text = "{:.2f}".format(math.degrees(raan))
pvnew = [x * (math.pow(vel_mag,2)-(mu/pos_mag)) for x in pos_vec]
vvnew = [x * (pos_mag*v_radial) for x in vel_vec]
e_vec1 = [(1/mu) * x for x in pvnew]
e_vec2 = [(1/mu) * x for x in vvnew]
e_vec = [e_vec1[0] - e_vec2[0],e_vec1[1] - e_vec2[1],e_vec1[2] - e_vec2[2] ]
e_mag = math.sqrt(dot(e_vec,e_vec))
self.orbit_data.ids.e.text = "{:.4f}".format(e_mag)
arg_per = math.acos(dot(node_vec,e_vec)/(node_mag*e_mag))
if e_vec[2] <= 0:
arg_per = math.radians(360) - arg_per
self.orbit_data.ids.arg_per.text = "{:.2f}".format(math.degrees(arg_per))
ta = math.acos(dot(e_vec,pos_vec)/(e_mag*pos_mag))
if v_radial <= 0:
ta = math.radians(360) - ta
self.orbit_data.ids.true_anomaly.text = "{:.2f}".format(math.degrees(ta))
apogee = (math.pow(h_mom_mag,2)/mu)*(1/(1+e_mag*math.cos(math.radians(180))))
perigee = (math.pow(h_mom_mag,2)/mu)*(1/(1+e_mag*math.cos(0)))
apogee_height = apogee - 6371.00
perigee_height = perigee - 6371.00
sma = 0.5*(apogee+perigee) #km
period = (2*math.pi/math.sqrt(mu))*math.pow(sma,3/2) #seconds
cmg1_active = int((values[145])[0])
cmg2_active = int((values[146])[0])
cmg3_active = int((values[147])[0])
cmg4_active = int((values[148])[0])
numCMGs = int((values[149])[0])
CMGtorqueRoll = float((values[150])[0])
CMGtorquePitch = float((values[151])[0])
CMGtorqueYaw = float((values[152])[0])
CMGmomentum = float((values[153])[0])
CMGmompercent = float((values[154])[0])
CMGmomcapacity = float((values[175])[0])
cmg1_spintemp = float((values[181])[0])
cmg2_spintemp = float((values[182])[0])
cmg3_spintemp = float((values[183])[0])
cmg4_spintemp = float((values[184])[0])
cmg1_halltemp = float((values[185])[0])
cmg2_halltemp = float((values[186])[0])
cmg3_halltemp = float((values[187])[0])
cmg4_halltemp = float((values[188])[0])
cmg1_vibration = float((values[237])[0])
cmg2_vibration = float((values[238])[0])
cmg3_vibration = float((values[239])[0])
cmg4_vibration = float((values[240])[0])
cmg1_motorcurrent = float((values[241])[0])
cmg2_motorcurrent = float((values[242])[0])
cmg3_motorcurrent = float((values[243])[0])
cmg4_motorcurrent = float((values[244])[0])
cmg1_wheelspeed = float((values[245])[0])
cmg2_wheelspeed = float((values[246])[0])
cmg3_wheelspeed = float((values[247])[0])
cmg4_wheelspeed = float((values[248])[0])
#EVA Telemetry
airlock_pump_voltage = int((values[71])[0])
airlock_pump_voltage_timestamp = float((timestamps[71])[0])
airlock_pump_switch = int((values[72])[0])
crewlockpres = float((values[16])[0])
airlockpres = float((values[77])[0])
#MSS Robotics Stuff
mt_worksite = int((values[258])[0])
self.mss_mt_screen.ids.mt_ws_value.text = str(mt_worksite)
mt_position = float((values[257])[0])
mt_position_timestamp = float((timestamps[257])[0])
self.mss_mt_screen.ids.mt_position_value.text = str(mt_position)
if (mt_position_timestamp - old_mt_timestamp) > 0:
mt_speed = (mt_position - old_mt_position) / ((mt_position_timestamp - old_mt_timestamp)*3600)
old_mt_timestamp = mt_position_timestamp
old_mt_position = mt_position
self.mss_mt_screen.ids.mt_speed_value.text = "{:2.2f}".format(float(mt_speed)) + " cm/s"
##US EPS Stuff---------------------------##
solarbeta = "{:.2f}".format(float((values[176])[0]))
power_1a = float(v1a) * float(c1a)
power_1b = float(v1b) * float(c1b)
power_2a = float(v2a) * float(c2a)
power_2b = float(v2b) * float(c2b)
power_3a = float(v3a) * float(c3a)
power_3b = float(v3b) * float(c3b)
power_4a = float(v4a) * float(c4a)
power_4b = float(v4b) * float(c4b)
USOS_Power = power_1a + power_1b + power_2a + power_2b + power_3a + power_3b + power_4a + power_4b
self.eps_screen.ids.usos_power.text = str("{:.0f}".format(USOS_Power*-1.0)) + " W"
self.eps_screen.ids.solarbeta.text = str(solarbeta)
avg_total_voltage = (float(v1a)+float(v1b)+float(v2a)+float(v2b)+float(v3a)+float(v3b)+float(v4a)+float(v4b))/8.0
avg_1a = (channel1A_voltage[0]+channel1A_voltage[1]+channel1A_voltage[2]+channel1A_voltage[3]+channel1A_voltage[4]+channel1A_voltage[5]+channel1A_voltage[6]+channel1A_voltage[7]+channel1A_voltage[8]+channel1A_voltage[9])/10
avg_1b = (channel1B_voltage[0]+channel1B_voltage[1]+channel1B_voltage[2]+channel1B_voltage[3]+channel1B_voltage[4]+channel1B_voltage[5]+channel1B_voltage[6]+channel1B_voltage[7]+channel1B_voltage[8]+channel1B_voltage[9])/10
avg_2a = (channel2A_voltage[0]+channel2A_voltage[1]+channel2A_voltage[2]+channel2A_voltage[3]+channel2A_voltage[4]+channel2A_voltage[5]+channel2A_voltage[6]+channel2A_voltage[7]+channel2A_voltage[8]+channel2A_voltage[9])/10
avg_2b = (channel2B_voltage[0]+channel2B_voltage[1]+channel2B_voltage[2]+channel2B_voltage[3]+channel2B_voltage[4]+channel2B_voltage[5]+channel2B_voltage[6]+channel2B_voltage[7]+channel2B_voltage[8]+channel2B_voltage[9])/10
avg_3a = (channel3A_voltage[0]+channel3A_voltage[1]+channel3A_voltage[2]+channel3A_voltage[3]+channel3A_voltage[4]+channel3A_voltage[5]+channel3A_voltage[6]+channel3A_voltage[7]+channel3A_voltage[8]+channel3A_voltage[9])/10
avg_3b = (channel3B_voltage[0]+channel3B_voltage[1]+channel3B_voltage[2]+channel3B_voltage[3]+channel3B_voltage[4]+channel3B_voltage[5]+channel3B_voltage[6]+channel3B_voltage[7]+channel3B_voltage[8]+channel3B_voltage[9])/10
avg_4a = (channel4A_voltage[0]+channel4A_voltage[1]+channel4A_voltage[2]+channel4A_voltage[3]+channel4A_voltage[4]+channel4A_voltage[5]+channel4A_voltage[6]+channel4A_voltage[7]+channel4A_voltage[8]+channel4A_voltage[9])/10
avg_4b = (channel4B_voltage[0]+channel4B_voltage[1]+channel4B_voltage[2]+channel4B_voltage[3]+channel4B_voltage[4]+channel4B_voltage[5]+channel4B_voltage[6]+channel4B_voltage[7]+channel4B_voltage[8]+channel4B_voltage[9])/10
halfavg_1a = (channel1A_voltage[0]+channel1A_voltage[1]+channel1A_voltage[2]+channel1A_voltage[3]+channel1A_voltage[4])/5
halfavg_1b = (channel1B_voltage[0]+channel1B_voltage[1]+channel1B_voltage[2]+channel1B_voltage[3]+channel1B_voltage[4])/5
halfavg_2a = (channel2A_voltage[0]+channel2A_voltage[1]+channel2A_voltage[2]+channel2A_voltage[3]+channel2A_voltage[4])/5
halfavg_2b = (channel2B_voltage[0]+channel2B_voltage[1]+channel2B_voltage[2]+channel2B_voltage[3]+channel2B_voltage[4])/5
halfavg_3a = (channel3A_voltage[0]+channel3A_voltage[1]+channel3A_voltage[2]+channel3A_voltage[3]+channel3A_voltage[4])/5
halfavg_3b = (channel3B_voltage[0]+channel3B_voltage[1]+channel3B_voltage[2]+channel3B_voltage[3]+channel3B_voltage[4])/5
halfavg_4a = (channel4A_voltage[0]+channel4A_voltage[1]+channel4A_voltage[2]+channel4A_voltage[3]+channel4A_voltage[4])/5
halfavg_4b = (channel4B_voltage[0]+channel4B_voltage[1]+channel4B_voltage[2]+channel4B_voltage[3]+channel4B_voltage[4])/5
EPSstorageindex += 1
if EPSstorageindex > 9:
EPSstorageindex = 0
## Station Mode ##
if stationmode == 1.0:
self.iss_screen.ids.stationmode_value.text = "Crew Rescue"
elif stationmode == 2.0:
self.iss_screen.ids.stationmode_value.text = "Survival"
elif stationmode == 3.0:
self.iss_screen.ids.stationmode_value.text = "Reboost"
elif stationmode == 4.0:
self.iss_screen.ids.stationmode_value.text = "Proximity Operations"
elif stationmode == 5.0:
self.iss_screen.ids.stationmode_value.text = "EVA"
elif stationmode == 6.0:
self.iss_screen.ids.stationmode_value.text = "Microgravity"
elif stationmode == 7.0:
self.iss_screen.ids.stationmode_value.text = "Standard"
else:
self.iss_screen.ids.stationmode_value.text = "n/a"
## ISS Potential Problems ##
#ISS Leak - Check Pressure Levels
#Number of CMGs online could reveal CMG failure
#CMG speed less than 6600rpm
#Solar arrays offline
#Loss of attitude control, loss of cmg control
#ISS altitude too low
#Russion hook status - make sure all modules remain docked
##-------------------GNC Stuff---------------------------##
roll = math.degrees(math.atan2(2.0 * (quaternion0 * quaternion1 + quaternion2 * quaternion3), 1.0 - 2.0 * (quaternion1 * quaternion1 + quaternion2 * quaternion2))) + rollerror
pitch = math.degrees(math.asin(max(-1.0, min(1.0, 2.0 * (quaternion0 * quaternion2 - quaternion3 * quaternion1))))) + pitcherror
yaw = math.degrees(math.atan2(2.0 * (quaternion0 * quaternion3 + quaternion1 * quaternion2), 1.0 - 2.0 * (quaternion2 * quaternion2 + quaternion3 * quaternion3))) + yawerror
self.gnc_screen.ids.yaw.text = str("{:.2f}".format(yaw))
self.gnc_screen.ids.pitch.text = str("{:.2f}".format(pitch))
self.gnc_screen.ids.roll.text = str("{:.2f}".format(roll))
self.gnc_screen.ids.cmgsaturation.value = CMGmompercent
self.gnc_screen.ids.cmgsaturation_value.text = "CMG Saturation " + str("{:.1f}".format(CMGmompercent)) + "%"
if cmg1_active == 1:
self.gnc_screen.ids.cmg1.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg.png"
else:
self.gnc_screen.ids.cmg1.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg_offline.png"
if cmg2_active == 1:
self.gnc_screen.ids.cmg2.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg.png"
else:
self.gnc_screen.ids.cmg2.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg_offline.png"
if cmg3_active == 1:
self.gnc_screen.ids.cmg3.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg.png"
else:
self.gnc_screen.ids.cmg3.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg_offline.png"
if cmg4_active == 1:
self.gnc_screen.ids.cmg4.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg.png"
else:
self.gnc_screen.ids.cmg4.source = mimic_directory + "/Mimic/Pi/imgs/gnc/cmg_offline.png"
self.gnc_screen.ids.cmg1spintemp.text = "Spin Temp " + str("{:.1f}".format(cmg1_spintemp))
self.gnc_screen.ids.cmg1halltemp.text = "Hall Temp " + str("{:.1f}".format(cmg1_halltemp))
self.gnc_screen.ids.cmg1vibration.text = "Vibration " + str("{:.4f}".format(cmg1_vibration))
self.gnc_screen.ids.cmg1current.text = "Current " + str("{:.1f}".format(cmg1_motorcurrent))
self.gnc_screen.ids.cmg1speed.text = "Speed " + str("{:.1f}".format(cmg1_wheelspeed))
self.gnc_screen.ids.cmg2spintemp.text = "Spin Temp " + str("{:.1f}".format(cmg2_spintemp))
self.gnc_screen.ids.cmg2halltemp.text = "Hall Temp " + str("{:.1f}".format(cmg2_halltemp))
self.gnc_screen.ids.cmg2vibration.text = "Vibration " + str("{:.4f}".format(cmg2_vibration))
self.gnc_screen.ids.cmg2current.text = "Current " + str("{:.1f}".format(cmg2_motorcurrent))
self.gnc_screen.ids.cmg2speed.text = "Speed " + str("{:.1f}".format(cmg2_wheelspeed))
self.gnc_screen.ids.cmg3spintemp.text = "Spin Temp " + str("{:.1f}".format(cmg3_spintemp))
self.gnc_screen.ids.cmg3halltemp.text = "Hall Temp " + str("{:.1f}".format(cmg3_halltemp))
self.gnc_screen.ids.cmg3vibration.text = "Vibration " + str("{:.4f}".format(cmg3_vibration))
self.gnc_screen.ids.cmg3current.text = "Current " + str("{:.1f}".format(cmg3_motorcurrent))
self.gnc_screen.ids.cmg3speed.text = "Speed " + str("{:.1f}".format(cmg3_wheelspeed))
self.gnc_screen.ids.cmg4spintemp.text = "Spin Temp " + str("{:.1f}".format(cmg4_spintemp))
self.gnc_screen.ids.cmg4halltemp.text = "Hall Temp " + str("{:.1f}".format(cmg4_halltemp))
self.gnc_screen.ids.cmg4vibration.text = "Vibration " + str("{:.4f}".format(cmg4_vibration))
self.gnc_screen.ids.cmg4current.text = "Current " + str("{:.1f}".format(cmg4_motorcurrent))
self.gnc_screen.ids.cmg4speed.text = "Speed " + str("{:.1f}".format(cmg4_wheelspeed))
##-------------------EPS Stuff---------------------------##
#if halfavg_1a < 151.5: #discharging
# self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_1a.color = 1, 1, 1, 0.8
#elif avg_1a > 160.0: #charged
# self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_1a >= 151.5: #charging
# self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_1a.color = 1, 1, 1, 1.0
#if float(c1a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_1b < 151.5: #discharging
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_1b.color = 1, 1, 1, 0.8
#elif avg_1b > 160.0: #charged
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_1b >= 151.5: #charging
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_1b.color = 1, 1, 1, 1.0
#if float(c1b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_2a < 151.5: #discharging
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_2a.color = 1, 1, 1, 0.8
#elif avg_2a > 160.0: #charged
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_2a >= 151.5: #charging
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_2a.color = 1, 1, 1, 1.0
#if float(c2a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_2b < 151.5: #discharging
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_2b.color = 1, 1, 1, 0.8
#elif avg_2b > 160.0: #charged
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_2b >= 151.5: #charging
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_2b.color = 1, 1, 1, 1.0
#if float(c2b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_3a < 151.5: #discharging
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3a.color = 1, 1, 1, 0.8
#elif avg_3a > 160.0: #charged
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_3a >= 151.5: #charging
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_3a.color = 1, 1, 1, 1.0
#if float(c3a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_3b < 151.5: #discharging
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3b.color = 1, 1, 1, 0.8
#elif avg_3b > 160.0: #charged
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_3b >= 151.5: #charging
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_3b.color = 1, 1, 1, 1.0
#if float(c3b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_4a < 151.5: #discharging
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_4a.color = 1, 1, 1, 0.8
#elif avg_4a > 160.0: #charged
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_4a >= 151.5: #charging
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_4a.color = 1, 1, 1, 1.0
#if float(c4a) > 0.0: #power channel offline!
# self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if halfavg_4b < 151.5: #discharging
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
# #self.eps_screen.ids.array_4b.color = 1, 1, 1, 0.8
#elif avg_4b > 160.0: #charged
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
#elif halfavg_4b >= 151.5: #charging
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
# self.eps_screen.ids.array_4b.color = 1, 1, 1, 1.0
#if float(c4b) > 0.0: #power channel offline!
# self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#if avg_total_voltage > 151.5:
#else:
if float(v1a) >= 151.5 or float(v1b) >= 151.5 or float(v2a) >= 151.5 or float(v2b) >= 151.5 or float(v3a) >= 151.5 or float(v3b) >= 151.5 or float(v4a) >= 151.5 or float(v4b) >= 151.5:
self.eps_screen.ids.eps_sun.color = 1, 1, 1, 1
else:
self.eps_screen.ids.eps_sun.color = 1, 1, 1, 0.1
if float(v1a) < 151.5: #discharging
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_1a.color = 1, 1, 1, 0.8
elif float(v1a) > 160.0: #charged
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v1a) >= 151.5: #charging
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_1a.color = 1, 1, 1, 1.0
if float(c1a) > 0.0: #power channel offline!
self.eps_screen.ids.array_1a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v1b) < 151.5: #discharging
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_1b.color = 1, 1, 1, 0.8
elif float(v1b) > 160.0: #charged
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v1b) >= 151.5: #charging
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_1b.color = 1, 1, 1, 1.0
if float(c1b) > 0.0: #power channel offline!
self.eps_screen.ids.array_1b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v2a) < 151.5: #discharging
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_2a.color = 1, 1, 1, 0.8
elif float(v2a) > 160.0: #charged
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v2a) >= 151.5: #charging
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_2a.color = 1, 1, 1, 1.0
if float(c2a) > 0.0: #power channel offline!
self.eps_screen.ids.array_2a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v2b) < 151.5: #discharging
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_2b.color = 1, 1, 1, 0.8
elif float(v2b) > 160.0: #charged
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v2b) >= 151.5: #charging
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_2b.color = 1, 1, 1, 1.0
if float(c2b) > 0.0: #power channel offline!
self.eps_screen.ids.array_2b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v3a) < 151.5: #discharging
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3a.color = 1, 1, 1, 0.8
elif float(v3a) > 160.0: #charged
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v3a) >= 151.5: #charging
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_3a.color = 1, 1, 1, 1.0
if float(c3a) > 0.0: #power channel offline!
self.eps_screen.ids.array_3a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v3b) < 151.5: #discharging
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_3b.color = 1, 1, 1, 0.8
elif float(v3b) > 160.0: #charged
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v3b) >= 151.5: #charging
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_3b.color = 1, 1, 1, 1.0
if float(c3b) > 0.0: #power channel offline!
self.eps_screen.ids.array_3b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
if float(v4a) < 151.5: #discharging
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_4a.color = 1, 1, 1, 0.8
elif float(v4a) > 160.0: #charged
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v4a) >= 151.5: #charging
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_4a.color = 1, 1, 1, 1.0
if float(c4a) > 0.0: #power channel offline!
self.eps_screen.ids.array_4a.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
#4b has a lower setpoint voltage for now - reverted back as of US EVA 63
if float(v4b) < 141.5: #discharging
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-discharging.zip"
#self.eps_screen.ids.array_4b.color = 1, 1, 1, 0.8
elif float(v4b) > 150.0: #charged
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charged.zip"
elif float(v4b) >= 141.5: #charging
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-charging.zip"
self.eps_screen.ids.array_4b.color = 1, 1, 1, 1.0
if float(c4b) > 0.0: #power channel offline!
self.eps_screen.ids.array_4b.source = mimic_directory + "/Mimic/Pi/imgs/eps/array-offline.png"
##-------------------C&T Functionality-------------------##
self.ct_sgant_screen.ids.sgant_dish.angle = float(sgant_elevation)
self.ct_sgant_screen.ids.sgant_elevation.text = "{:.2f}".format(float(sgant_elevation))
#make sure radio animations turn off when no signal or no transmit
if float(sgant_transmit) == 1.0 and float(aos) == 1.0:
self.ct_sgant_screen.ids.radio_up.color = 1, 1, 1, 1
if "10" in tdrs:
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "11" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "12" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "6" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
if "7" in tdrs:
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.zip"
elif float(aos) == 0.0 and (float(sgant_transmit) == 0.0 or float(sgant_transmit) == 1.0):
self.ct_sgant_screen.ids.radio_up.color = 0, 0, 0, 0
self.ct_sgant_screen.ids.tdrs_east12.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_east6.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west11.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_west10.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
self.ct_sgant_screen.ids.tdrs_z7.source = mimic_directory + "/Mimic/Pi/imgs/ct/TDRS.png"
#now check main CT screen radio signal
if float(sgant_transmit) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sgant1_radio.color = 1, 1, 1, 1
self.ct_screen.ids.sgant2_radio.color = 1, 1, 1, 1
elif float(sgant_transmit) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
elif float(sgant_transmit) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sgant1_radio.color = 0, 0, 0, 0
self.ct_screen.ids.sgant2_radio.color = 0, 0, 0, 0
if float(sasa1_active) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sasa1_radio.color = 1, 1, 1, 1
elif float(sasa1_active) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
elif float(sasa1_active) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sasa1_radio.color = 0, 0, 0, 0
if float(sasa2_active) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.sasa2_radio.color = 1, 1, 1, 1
elif float(sasa2_active) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.sasa2_radio.color = 0, 0, 0, 0
elif float(sasa2_active) == 0.0:
self.ct_screen.ids.sasa2_radio.color = 0, 0, 0, 0
elif float(aos) == 0.0:
self.ct_screen.ids.sasa2_radio.color = 0, 0, 0, 0
if float(uhf1_power) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.uhf1_radio.color = 1, 1, 1, 1
elif float(uhf1_power) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.uhf1_radio.color = 1, 0, 0, 1
elif float(uhf1_power) == 0.0:
self.ct_screen.ids.uhf1_radio.color = 0, 0, 0, 0
if float(uhf2_power) == 1.0 and float(aos) == 1.0:
self.ct_screen.ids.uhf2_radio.color = 1, 1, 1, 1
elif float(uhf2_power) == 1.0 and float(aos) == 0.0:
self.ct_screen.ids.uhf2_radio.color = 1, 0, 0, 1
elif float(uhf2_power) == 0.0:
self.ct_screen.ids.uhf2_radio.color = 0, 0, 0, 0
##-------------------EVA Functionality-------------------##
if stationmode == 5:
evaflashevent = Clock.schedule_once(self.flashEVAbutton, 1)
##-------------------US EVA Functionality-------------------##
if airlock_pump_voltage == 1:
self.us_eva.ids.pumpvoltage.text = "Airlock Pump Power On!"
self.us_eva.ids.pumpvoltage.color = 0.33, 0.7, 0.18
else:
self.us_eva.ids.pumpvoltage.text = "Airlock Pump Power Off"
self.us_eva.ids.pumpvoltage.color = 0, 0, 0
if airlock_pump_switch == 1:
self.us_eva.ids.pumpswitch.text = "Airlock Pump Active!"
self.us_eva.ids.pumpswitch.color = 0.33, 0.7, 0.18
else:
self.us_eva.ids.pumpswitch.text = "Airlock Pump Inactive"
self.us_eva.ids.pumpswitch.color = 0, 0, 0
##activate EVA button flash
if (airlock_pump_voltage == 1 or crewlockpres < 734) and int(stationmode) == 5:
usevaflashevent = Clock.schedule_once(self.flashUS_EVAbutton, 1)
##No EVA Currently
if airlock_pump_voltage == 0 and airlock_pump_switch == 0 and crewlockpres > 740 and airlockpres > 740:
eva = False
self.us_eva.ids.leak_timer.text = ""
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/BlankLights.png'
self.us_eva.ids.EVA_occuring.color = 1, 0, 0
self.us_eva.ids.EVA_occuring.text = "Currently No EVA"
##EVA Standby - NOT UNIQUE
if airlock_pump_voltage == 1 and airlock_pump_switch == 1 and crewlockpres > 740 and airlockpres > 740 and int(stationmode) == 5:
standby = True
self.us_eva.ids.leak_timer.text = "~160s Leak Check"
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/StandbyLights.png'
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
self.us_eva.ids.EVA_occuring.text = "EVA Standby"
else:
standby = False
##EVA Prebreath Pressure
if airlock_pump_voltage == 1 and crewlockpres > 740 and airlockpres > 740 and int(stationmode) == 5:
prebreath1 = True
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/PreBreatheLights.png'
self.us_eva.ids.leak_timer.text = "~160s Leak Check"
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
self.us_eva.ids.EVA_occuring.text = "Pre-EVA Nitrogen Purge"
##EVA Depress1
if airlock_pump_voltage == 1 and airlock_pump_switch == 1 and crewlockpres < 740 and airlockpres > 740 and int(stationmode) == 5:
depress1 = True
self.us_eva.ids.leak_timer.text = "~160s Leak Check"
self.us_eva.ids.EVA_occuring.text = "Crewlock Depressurizing"
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/DepressLights.png'
##EVA Leakcheck
if airlock_pump_voltage == 1 and crewlockpres < 260 and crewlockpres > 250 and (depress1 or leakhold) and int(stationmode) == 5:
if depress1:
holdstartTime = float(unixconvert[7])*24+unixconvert[3]+float(unixconvert[4])/60+float(unixconvert[5])/3600
leakhold = True
depress1 = False
self.us_eva.ids.EVA_occuring.text = "Leak Check in Progress!"
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
Clock.schedule_once(self.hold_timer, 1)
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/LeakCheckLights.png'
else:
leakhold = False
##EVA Depress2
if airlock_pump_voltage == 1 and crewlockpres <= 250 and crewlockpres > 3 and int(stationmode) == 5:
leakhold = False
self.us_eva.ids.leak_timer.text = "Complete"
self.us_eva.ids.EVA_occuring.text = "Crewlock Depressurizing"
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/DepressLights.png'
##EVA in progress
if crewlockpres < 2.5 and int(stationmode) == 5:
eva = True
self.us_eva.ids.EVA_occuring.text = "EVA In Progress!!!"
self.us_eva.ids.EVA_occuring.color = 0.33, 0.7, 0.18
self.us_eva.ids.leak_timer.text = "Complete"
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/InProgressLights.png'
evatimerevent = Clock.schedule_once(self.EVA_clock, 1)
##Repress
if airlock_pump_voltage == 0 and airlock_pump_switch == 0 and crewlockpres >= 3 and crewlockpres < 734 and int(stationmode) == 5:
eva = False
self.us_eva.ids.EVA_occuring.color = 0, 0, 1
self.us_eva.ids.EVA_occuring.text = "Crewlock Repressurizing"
self.us_eva.ids.Crewlock_Status_image.source = mimic_directory + '/Mimic/Pi/imgs/eva/RepressLights.png'
##-------------------RS EVA Functionality-------------------##
##if eva station mode and not us eva
if airlock_pump_voltage == 0 and crewlockpres >= 734 and stationmode == 5:
rsevaflashevent = Clock.schedule_once(self.flashRS_EVAbutton, 1)
##-------------------EVA Functionality End-------------------##
# if (difference > -10) and (isinstance(App.get_running_app().root_window.children[0], Popup)==False):
# LOSpopup = Popup(title='Loss of Signal', content=Label(text='Possible LOS Soon'), size_hint=(0.3, 0.2), auto_dismiss=True)
# LOSpopup.open()
##-------------------Fake Orbit Simulator-------------------##
self.fakeorbit_screen.ids.psarj.text = str(psarj)
self.fakeorbit_screen.ids.ssarj.text = str(ssarj)
self.fakeorbit_screen.ids.beta1a.text = str(beta1a)
self.fakeorbit_screen.ids.beta1b.text = str(beta1b)
self.fakeorbit_screen.ids.beta2a.text = str(beta2a)
self.fakeorbit_screen.ids.beta2b.text = str(beta2b)
self.fakeorbit_screen.ids.beta3a.text = str(beta3a)
self.fakeorbit_screen.ids.beta3b.text = str(beta3b)
self.fakeorbit_screen.ids.beta4a.text = str(beta4a)
self.fakeorbit_screen.ids.beta4b.text = str(beta4b)
if demoboolean:
if Disco:
serialWrite("Disco ")
Disco = False
serialWrite("PSARJ=" + psarj + " " + "SSARJ=" + ssarj + " " + "PTRRJ=" + ptrrj + " " + "STRRJ=" + strrj + " " + "B1B=" + beta1b + " " + "B1A=" + beta1a + " " + "B2B=" + beta2b + " " + "B2A=" + beta2a + " " + "B3B=" + beta3b + " " + "B3A=" + beta3a + " " + "B4B=" + beta4b + " " + "B4A=" + beta4a + " " + "V1A=" + v1a + " " + "V2A=" + v2a + " " + "V3A=" + v3a + " " + "V4A=" + v4a + " " + "V1B=" + v1b + " " + "V2B=" + v2b + " " + "V3B=" + v3b + " " + "V4B=" + v4b + " ")
self.eps_screen.ids.psarj_value.text = psarj + "deg"
self.eps_screen.ids.ssarj_value.text = ssarj + "deg"
self.tcs_screen.ids.ptrrj_value.text = ptrrj + "deg"
self.tcs_screen.ids.strrj_value.text = strrj + "deg"
self.eps_screen.ids.beta1b_value.text = beta1b
self.eps_screen.ids.beta1a_value.text = beta1a
self.eps_screen.ids.beta2b_value.text = beta2b
self.eps_screen.ids.beta2a_value.text = beta2a
self.eps_screen.ids.beta3b_value.text = beta3b
self.eps_screen.ids.beta3a_value.text = beta3a
self.eps_screen.ids.beta4b_value.text = beta4b
self.eps_screen.ids.beta4a_value.text = beta4a
self.eps_screen.ids.c1a_value.text = c1a + "A"
self.eps_screen.ids.v1a_value.text = v1a + "V"
self.eps_screen.ids.c1b_value.text = c1b + "A"
self.eps_screen.ids.v1b_value.text = v1b + "V"
self.eps_screen.ids.c2a_value.text = c2a + "A"
self.eps_screen.ids.v2a_value.text = v2a + "V"
self.eps_screen.ids.c2b_value.text = c2b + "A"
self.eps_screen.ids.v2b_value.text = v2b + "V"
self.eps_screen.ids.c3a_value.text = c3a + "A"
self.eps_screen.ids.v3a_value.text = v3a + "V"
self.eps_screen.ids.c3b_value.text = c3b + "A"
self.eps_screen.ids.v3b_value.text = v3b + "V"
self.eps_screen.ids.c4a_value.text = c4a + "A"
self.eps_screen.ids.v4a_value.text = v4a + "V"
self.eps_screen.ids.c4b_value.text = c4b + "A"
self.eps_screen.ids.v4b_value.text = v4b + "V"
self.iss_screen.ids.altitude_value.text = str(altitude) + " km"
self.iss_screen.ids.velocity_value.text = str(velocity) + " m/s"
self.iss_screen.ids.stationmass_value.text = str(iss_mass) + " kg"
self.us_eva.ids.EVA_needle.angle = float(self.map_rotation(0.0193368*float(crewlockpres)))
self.us_eva.ids.crewlockpressure_value.text = "{:.2f}".format(0.0193368*float(crewlockpres))
psi_bar_x = self.map_psi_bar(0.0193368*float(crewlockpres)) #convert to torr
self.us_eva.ids.EVA_psi_bar.pos_hint = {"center_x": psi_bar_x, "center_y": 0.56}
##-------------------Signal Status Check-------------------##
if client_status.split(":")[0] == "CONNECTED":
if sub_status == "Subscribed":
#client connected and subscibed to ISS telemetry
if float(aos) == 1.00:
self.signal_acquired() #signal status 1 means acquired
sasa_xmit = 1
elif float(aos) == 0.00:
self.signal_lost() #signal status 0 means loss of signal
sasa_xmit = 0
elif float(aos) == 2.00:
self.signal_stale() #signal status 2 means data is not being updated from server
sasa_xmit = 0
else:
self.signal_unsubscribed()
else:
self.signal_unsubscribed()
if mimicbutton: # and float(aos) == 1.00):
serialWrite("PSARJ=" + psarj + " " + "SSARJ=" + ssarj + " " + "PTRRJ=" + ptrrj + " " + "STRRJ=" + strrj + " " + "B1B=" + beta1b + " " + "B1A=" + beta1a + " " + "B2B=" + beta2b + " " + "B2A=" + beta2a + " " + "B3B=" + beta3b + " " + "B3A=" + beta3a + " " + "B4B=" + beta4b + " " + "B4A=" + beta4a + " " + "AOS=" + aos + " " + "V1A=" + v1a + " " + "V2A=" + v2a + " " + "V3A=" + v3a + " " + "V4A=" + v4a + " " + "V1B=" + v1b + " " + "V2B=" + v2b + " " + "V3B=" + v3b + " " + "V4B=" + v4b + " " + "ISS=" + module + " " + "Sgnt_el=" + str(int(sgant_elevation)) + " " + "Sgnt_xel=" + str(int(sgant_xelevation)) + " " + "Sgnt_xmit=" + str(int(sgant_transmit)) + " " + "SASA_Xmit=" + str(int(sasa_xmit)) + " SASA_AZ=" + str(float(sasa_az)) + " SASA_EL=" + str(float(sasa_el)) + " ")
#All GUI Screens are on separate kv files
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/Settings_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/FakeOrbitScreen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/Orbit_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/Orbit_Pass.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/Orbit_Data.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/ISS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/ECLSS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/EPS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/CT_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/CT_SGANT_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/CT_SASA_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/CT_UHF_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/CT_Camera_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/GNC_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/TCS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/EVA_US_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/EVA_RS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/EVA_Main_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/EVA_Pictures.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/Crew_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/RS_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/ManualControlScreen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/MSS_MT_Screen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/MimicScreen.kv')
Builder.load_file(mimic_directory + '/Mimic/Pi/Screens/MainScreen.kv')
Builder.load_string('''
#:kivy 1.8
#:import kivy kivy
#:import win kivy.core.window
ScreenManager:
Settings_Screen:
FakeOrbitScreen:
Orbit_Screen:
Orbit_Pass:
Orbit_Data:
EPS_Screen:
CT_Screen:
CT_SASA_Screen:
CT_UHF_Screen:
CT_Camera_Screen:
CT_SGANT_Screen:
ISS_Screen:
ECLSS_Screen:
GNC_Screen:
TCS_Screen:
EVA_US_Screen:
EVA_RS_Screen:
EVA_Main_Screen:
EVA_Pictures:
RS_Screen:
Crew_Screen:
ManualControlScreen:
MSS_MT_Screen:
MimicScreen:
MainScreen:
''')
if __name__ == '__main__':
MainApp().run()
| mit |
subhadram/insilico | examples/NeuronSAHPVGCCNetwork/multiplot.py | 1 | 1421 | from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
data = np.genfromtxt('switch.dat')
#data1 = np.genfromtxt('np10.dat')
matplotlib.rcParams.update({'font.size': 24})
if 1:
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlim(4000,4040)
#host.set_ylim(0, 2)
host.locator_params(axis='y', nbins=4)
host.locator_params(axis='x', nbins=2)
par1.locator_params(axis='y', nbins=2)
host.set_xlabel("Time")
host.set_ylabel("Voltage (mV)")
par1.set_ylabel("Calcium current (muA/cm^2)")
#par2.set_ylabel("isyn1")
par1.locator_params(axis='y', nbins=4)
par1.locator_params(axis='x', nbins=2)
p1, = host.plot(data[:,0], data[:,2],)
p2, = par1.plot(data[:,0], -1.0*data[:,4],linewidth = 2.0)
#p2, = par1.plot(data[:,0], data[:,3],linewidth = 1.5,label = "h")
#p2, = par1.plot(data[:,0], data[:,4],linewidth = 1.5,label = "n")
#p3, = par2.plot(data[:,0], data[:,5], label="m")
#par1.set_ylim(-100, 10)
#par2.set_ylim(1, 65)
host.legend(loc ="best")
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
#par2.axis["right"].label.set_color(p3.get_color())
plt.draw()
plt.show()
| gpl-3.0 |
maweigert/spimagine | tests/test_utils/test_alpha_shape.py | 1 | 2067 | """
[email protected]
"""
from __future__ import absolute_import
import numpy as np
from spimagine import volfig, Mesh, qt_exec
from spimagine.utils import alpha_shape
import matplotlib
matplotlib.use("Qt5Agg")
def test_2d():
import matplotlib.pyplot as plt
plt.ion()
np.random.seed(0)
N = 500
phi = np.random.uniform(0, 2*np.pi, N)
points = np.stack([np.cos(phi), np.sin(phi)*np.cos(phi)]).T
#points += .1*np.random.uniform(-1, 1, (N, 2))
#points = np.concatenate([points,.9*points])
points, normals, indices = alpha_shape(points, .1)
plt.clf()
_x = points[indices].reshape(len(indices)*2,2)
_n = normals[indices].reshape(len(indices)*2,2)
plt.quiver(_x[:,0],_x[:,1],_n[:,0],_n[:,1], color = (.5,)*3)
plt.plot(points[:,0],points[:,1],".")
for edge in indices:
plt.plot(points[edge, 0], points[edge, 1], "k", lw=2)
plt.axis("equal")
plt.pause(0.1)
plt.close()
return points, normals, indices
def test_3d():
N = 1000
# generate a concave shape
phi = np.random.uniform(0, 2*np.pi, N)
theta = np.arccos(np.random.uniform(-1,1, N))
points = np.stack([np.cos(phi)*np.sin(theta)*np.cos(theta), np.sin(phi)*np.sin(theta)*np.cos(theta), np.cos(theta)]).T
#points += .1*np.random.uniform(-1, 1, (N,3))
#points = np.concatenate([points,.9*points])
#get the alpha shape indices and normals (set alpha = -1 for the convex hull)
points, normals, indices = alpha_shape(points, alpha = .2)
m = Mesh(vertices = points.flatten(),
normals = normals.flatten(),
indices = indices.flatten(),
facecolor = (1.,1.,.3))
w = volfig(1)
w.glWidget.add_mesh(m)
w.transform.setRotation(0.4,0,1,0)
w.show()
# add this when run from command line
from PyQt5 import QtCore
QtCore.QTimer.singleShot(1000,w.closeMe)
qt_exec()
return points, normals, indices
if __name__ == '__main__':
#points, normals, indices = test_2d()
points, normals, indices = test_3d()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
skggm/skggm | doc/conf.py | 1 | 10495 | # -*- coding: utf-8 -*-
#
# skggm documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(1,os.path.abspath("../inverse_covariance/"))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery'
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# Suggested by readthedocs to prevent build failure
'backreferences_dir' : False,
'filename_pattern' : '../examples/convergence_',
# Uncomment below if examples fail
'expected_failing_examples': ['../examples/convergence_comparison.py'],
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'}
# After python 3.3
# from unittest.mock import MagicMock
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['pyquic', 'quic_graph_lasso']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'skggm'
copyright = u'2017, Manjari Narayan and Jason Laska'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skggmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'skggm.tex', u'skggm Documentation',
u'Manjari Narayan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'skggm', u'skggm Documentation',
[u'Manjari Narayan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'skggm', u'skggm Documentation',
u'Manjari Narayan', 'skggm', 'One line description of project.',
'Miscellaneous'),
]
# Commenting out generate_example_rst, setup
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
app.connect('autodoc-process-docstring', generate_example_rst)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| mit |
hobson/pug-nlp | pug/nlp/plot.py | 1 | 13140 | import seaborn as sb
import pandas
np = pandas.np
import bisect
from matplotlib import pyplot as plt
# from pylab import figure, savefig, imshow, axes, axis, cm, show
#####################################################################################
######## Based on the statistics plotting wrapper from Udacity ST-101
######## https://www.udacity.com/wiki/plotting_graphs_with_python
def scatterplot(x, y):
plt.ion()
plt.plot(x, y, 'b.')
plt.xlim(min(x) - 1, max(x) + 1)
plt.ylim(min(y) - 1, max(y) + 1)
plt.draw()
def barplot(labels, data):
pos = np.arange(len(data))
plt.ion()
plt.xticks(pos + 0.4, labels)
plt.bar(pos, data)
plt.grid('on')
#plt.draw()
def histplot(data, bins=None, nbins=5):
if not bins:
minx, maxx = min(data), max(data)
space = (maxx - minx) / float(nbins)
bins = np.arange(minx, maxx, space)
binned = [bisect.bisect(bins, x) for x in data]
l = ['%.1g' % x for x in list(bins) + [maxx]] if space < 1 or space > 1000 else [str(int(x)) for x in list(bins) + [maxx]]
print l
if len(str(l[1]) + '-' + l[2]) > 10:
displab = l[:-1]
else:
displab = [x + '-\n ' + y for x, y in zip(l[:-1], l[1:])]
barplot(displab, [binned.count(x + 1) for x in range(len(bins))])
def barchart(x, y, numbins=None):
if numbins is None:
numbins = int(len(x) ** 0.75) + 1
datarange = max(x) - min(x)
bin_width = float(datarange) / numbins
pos = min(x)
bins = [0 for i in range(numbins + 1)]
for i in range(numbins):
bins[i] = pos
pos += bin_width
bins[numbins] = max(x) + 1
binsum = [0 for i in range(numbins)]
bincount = [0 for i in range(numbins)]
binaverage = [0 for i in range(numbins)]
for i in range(numbins):
for j in range(len(x)):
if x[j] >= bins[i] and x[j] < bins[i + 1]:
bincount[i] += 1
binsum[i] += y[j]
for i in range(numbins):
binaverage[i] = float(binsum[i]) / bincount[i]
barplot(range(numbins), binaverage)
return x, y
def piechart(labels, data):
plt.ion()
fig = plt.figure(figsize=(7, 7))
plt.pie(data, labels=labels, autopct='%1.2f%%')
plt.draw()
return fig
def regression_and_plot(x, y=None):
"""
Fit a line to the x, y data supplied and plot it along with teh raw samples
>>> age=[25, 26, 33, 29, 27, 21, 26, 35, 21, 37, 21, 38, 18, 19, 36, 30, 29, 24, 24, 36, 36, 27, 33, 23, 21, 26, 27, 27, 24, 26, 25, 24, 22, 25, 40, 39, 19, 31, 33, 30, 33, 27, 40, 32, 31, 35, 26, 34, 27, 34, 33, 20, 19, 40, 39, 39, 37, 18, 35, 20, 28, 31, 30, 29, 31, 18, 40, 20, 32, 20, 34, 34, 25, 29, 40, 40, 39, 36, 39, 34, 34, 35, 39, 38, 33, 32, 21, 29, 36, 33, 30, 39, 21, 19, 38, 30, 40, 36, 34, 28, 37, 29, 39, 25, 36, 33, 37, 19, 28, 26, 18, 22, 40, 20, 40, 20, 39, 29, 26, 26, 22, 37, 34, 29, 24, 23, 21, 19, 29, 30, 23, 40, 30, 30, 19, 39, 39, 25, 36, 38, 24, 32, 34, 33, 36, 30, 35, 26, 28, 23, 25, 23, 40, 20, 26, 26, 22, 23, 18, 36, 34, 36, 35, 40, 39, 39, 33, 22, 37, 20, 37, 35, 20, 23, 37, 32, 25, 35, 35, 22, 21, 31, 40, 26, 24, 29, 37, 19, 33, 31, 29, 27, 21, 19, 39, 34, 34, 40, 26, 39, 35, 31, 35, 24, 19, 27, 27, 20, 28, 30, 23, 21, 20, 26, 31, 24, 25, 25, 22, 32, 28, 36, 21, 38, 18, 25, 21, 33, 40, 19, 38, 33, 37, 32, 31, 31, 38, 19, 37, 37, 32, 36, 34, 35, 35, 35, 37, 35, 39, 34, 24, 25, 18, 40, 33, 32, 23, 25, 19, 39, 38, 36, 32, 27, 22, 40, 28, 29, 25, 36, 26, 28, 32, 34, 34, 21, 21, 32, 19, 35, 30, 35, 26, 31, 38, 34, 33, 35, 37, 38, 36, 40, 22, 30, 28, 28, 29, 36, 24, 28, 28, 28, 26, 21, 35, 22, 32, 28, 19, 33, 18, 22, 36, 26, 19, 26, 30, 27, 28, 24, 36, 37, 20, 32, 38, 39, 38, 30, 32, 30, 26, 23, 19, 29, 33, 34, 23, 30, 32, 40, 36, 29, 39, 34, 34, 22, 22, 22, 36, 38, 38, 30, 26, 40, 34, 21, 34, 38, 32, 35, 35, 26, 28, 20, 40, 23, 24, 26, 24, 39, 21, 33, 31, 39, 39, 20, 22, 18, 23, 36, 32, 37, 36, 26, 30, 30, 30, 21, 22, 40, 38, 22, 27, 23, 21, 22, 20, 30, 31, 40, 19, 32, 24, 21, 27, 32, 30, 34, 18, 25, 22, 40, 23, 19, 24, 24, 25, 40, 27, 29, 22, 39, 38, 34, 39, 30, 31, 33, 34, 25, 20, 20, 20, 20, 24, 19, 21, 31, 31, 29, 38, 39, 33, 40, 24, 38, 37, 18, 24, 38, 38, 22, 40, 21, 36, 30, 21, 30, 35, 20, 25, 25, 29, 30, 20, 29, 29, 31, 20, 26, 26, 38, 37, 39, 31, 35, 36, 30, 38, 36, 23, 39, 39, 20, 30, 34, 21, 23, 21, 33, 30, 33, 32, 36, 18, 31, 32, 25, 23, 23, 21, 34, 18, 40, 21, 29, 29, 21, 38, 35, 38, 32, 38, 27, 23, 33, 29, 19, 20, 35, 29, 27, 28, 20, 40, 35, 40, 40, 20, 36, 38, 28, 30, 30, 36, 29, 27, 25, 33, 19, 27, 28, 34, 36, 27, 40, 38, 37, 31, 33, 38, 36, 25, 23, 22, 23, 34, 26, 24, 28, 32, 22, 18, 29, 19, 21, 27, 28, 35, 30, 40, 28, 37, 34, 24, 40, 33, 29, 30, 36, 25, 26, 26, 28, 34, 39, 34, 26, 24, 33, 38, 37, 36, 34, 37, 33, 25, 27, 30, 26, 21, 40, 26, 25, 25, 40, 28, 35, 36, 39, 33, 36, 40, 32, 36, 26, 24, 36, 27, 28, 26, 37, 36, 37, 36, 20, 34, 30, 32, 40, 20, 31, 23, 27, 19, 24, 23, 24, 25, 36, 26, 33, 30, 27, 26, 28, 28, 21, 31, 24, 27, 24, 29, 29, 28, 22, 20, 23, 35, 30, 37, 31, 31, 21, 32, 29, 27, 27, 30, 39, 34, 23, 35, 39, 27, 40, 28, 36, 35, 38, 21, 18, 21, 38, 37, 24, 21, 25, 35, 27, 35, 24, 36, 32, 20]
>>> wage=[17000, 13000, 28000, 45000, 28000, 1200, 15500, 26400, 14000, 35000, 16400, 50000, 2600, 9000, 27000, 150000, 32000, 22000, 65000, 56000, 6500, 30000, 70000, 9000, 6000, 34000, 40000, 30000, 6400, 87000, 20000, 45000, 4800, 34000, 75000, 26000, 4000, 50000, 63000, 14700, 45000, 42000, 10000, 40000, 70000, 14000, 54000, 14000, 23000, 24400, 27900, 4700, 8000, 19000, 17300, 45000, 3900, 2900, 138000, 2100, 60000, 55000, 45000, 40000, 45700, 90000, 40000, 13000, 30000, 2000, 75000, 60000, 70000, 41000, 42000, 31000, 39000, 104000, 52000, 20000, 59000, 66000, 63000, 32000, 11000, 16000, 6400, 17000, 47700, 5000, 25000, 35000, 20000, 14000, 29000, 267000, 31000, 27000, 64000, 39600, 267000, 7100, 33000, 31500, 40000, 23000, 3000, 14000, 44000, 15100, 2600, 6200, 50000, 3000, 25000, 2000, 38000, 22000, 20000, 2500, 1500, 42000, 30000, 27000, 7000, 11900, 27000, 24000, 4300, 30200, 2500, 30000, 70000, 38700, 8000, 36000, 66000, 24000, 95000, 39000, 20000, 23000, 56000, 25200, 62000, 12000, 13000, 35000, 35000, 14000, 24000, 12000, 14000, 31000, 40000, 22900, 12000, 14000, 1600, 12000, 80000, 90000, 126000, 1600, 100000, 8000, 71000, 40000, 42000, 40000, 120000, 35000, 1200, 4000, 32000, 8000, 14500, 65000, 15000, 3000, 2000, 23900, 1000, 22000, 18200, 8000, 30000, 23000, 30000, 27000, 70000, 40000, 18000, 3100, 57000, 25000, 32000, 10000, 4000, 49000, 93000, 35000, 49000, 40000, 5500, 30000, 25000, 5700, 6000, 30000, 42900, 8000, 5300, 90000, 85000, 15000, 17000, 5600, 11500, 52000, 1000, 42000, 2100, 50000, 1500, 40000, 28000, 5300, 149000, 3200, 12000, 83000, 45000, 31200, 25000, 72000, 70000, 7000, 23000, 40000, 40000, 28000, 10000, 48000, 20000, 60000, 19000, 25000, 39000, 68000, 2300, 23900, 5000, 16300, 80000, 45000, 12000, 9000, 1300, 35000, 35000, 47000, 32000, 18000, 20000, 20000, 23400, 48000, 8000, 5200, 33500, 22000, 22000, 52000, 104000, 28000, 13000, 12000, 15000, 53000, 27000, 50000, 13900, 23000, 28100, 23000, 12000, 55000, 83000, 31000, 33200, 45000, 3000, 18000, 11000, 41000, 36000, 33600, 38000, 45000, 53000, 24000, 3000, 37500, 7700, 4800, 29000, 6600, 12400, 20000, 2000, 1100, 55000, 13400, 10000, 6000, 6000, 16000, 19000, 8300, 52000, 58000, 27000, 25000, 80000, 10000, 22000, 18000, 21000, 8000, 15200, 15000, 5000, 50000, 89000, 7000, 65000, 58000, 42000, 55000, 40000, 14000, 36000, 30000, 7900, 6000, 1200, 10000, 54000, 12800, 35000, 34000, 40000, 45000, 9600, 3300, 39000, 22000, 40000, 68000, 24400, 1000, 10800, 8400, 50000, 22000, 20000, 20000, 1300, 9000, 14200, 32000, 65000, 18000, 18000, 3000, 16700, 1500, 1400, 15000, 55000, 42000, 70000, 35000, 21600, 5800, 35000, 5700, 1700, 40000, 40000, 45000, 25000, 13000, 6400, 11000, 4200, 30000, 32000, 120000, 10000, 19000, 12000, 13000, 37000, 40000, 38000, 60000, 3100, 16000, 18000, 130000, 5000, 5000, 35000, 1000, 14300, 100000, 20000, 33000, 8000, 9400, 87000, 2500, 12000, 12000, 33000, 16500, 25500, 7200, 2300, 3100, 2100, 3200, 45000, 40000, 3800, 30000, 12000, 62000, 45000, 46000, 50000, 40000, 13000, 50000, 23000, 4000, 40000, 25000, 16000, 3000, 80000, 27000, 68000, 3500, 1300, 10000, 46000, 5800, 24000, 12500, 50000, 48000, 29000, 19000, 26000, 30000, 10000, 10000, 20000, 43000, 105000, 55000, 5000, 65000, 68000, 38000, 47000, 48700, 6100, 55000, 30000, 5000, 3500, 23400, 11400, 7000, 1300, 80000, 65000, 45000, 19000, 3000, 17100, 22900, 31200, 35000, 3000, 5000, 1000, 36000, 4800, 60000, 9800, 30000, 85000, 18000, 24000, 60000, 30000, 2000, 39000, 12000, 10500, 60000, 36000, 10500, 3600, 1200, 28600, 48000, 20800, 5400, 9600, 30000, 30000, 20000, 6700, 30000, 3200, 42000, 37000, 5000, 18000, 20000, 14000, 12000, 18000, 3000, 13500, 35000, 38000, 30000, 36000, 66000, 45000, 32000, 46000, 80000, 27000, 4000, 21000, 7600, 16000, 10300, 27000, 19000, 14000, 19000, 3100, 20000, 2700, 27000, 7000, 13600, 75000, 35000, 36000, 25000, 6000, 36000, 50000, 46000, 3000, 37000, 40000, 30000, 48800, 19700, 16000, 14000, 12000, 25000, 25000, 28600, 17000, 31200, 57000, 23000, 23500, 46000, 18700, 26700, 9900, 16000, 3000, 52000, 51000, 14000, 14400, 27000, 26000, 60000, 25000, 6000, 20000, 3000, 69000, 24800, 12000, 3100, 18000, 20000, 267000, 28000, 9800, 18200, 80000, 6800, 21100, 20000, 68000, 20000, 45000, 8000, 40000, 31900, 28000, 24000, 2000, 32000, 11000, 20000, 5900, 16100, 23900, 40000, 37500, 11000, 55000, 37500, 60000, 23000, 9500, 34500, 4000, 9000, 11200, 35200, 30000, 18000, 21800, 19700, 16700, 12500, 11300, 4000, 39000, 32000, 14000, 65000, 50000, 2000, 30400, 22000, 1600, 56000, 40000, 85000, 9000, 10000, 19000, 5300, 5200, 43000, 60000, 50000, 38000, 267000, 15600, 1800, 17000, 45000, 31000, 5000, 8000, 43000, 103000, 45000, 8800, 26000, 47000, 40000, 8000]
>>> # udacity class data shows that people earn on average $1.8K more for each year of age and start with a $21K deficit
>>> regressionplot(age, wage) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
(1768.275..., -21991.9...)
>>> # Gainseville, FL census data shows 14 more new homes are built each year, starting with 517 completed in 1991
>>> regression_and_plot([483, 576, 529, 551, 529, 551, 663, 639, 704, 675, 601, 621, 630, 778, 831, 610]) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
(14.213..., 516.588...)
"""
if y is None:
y = x
x = range(len(x))
if not isinstance(x[0], (float, int, np.float64, np.float32)):
x = [row[0] for row in x]
A = np.vstack([np.array(x), np.ones(len(x))]).T
fit = np.linalg.lstsq(A, y)
poly = regressionplot(x, y, fit)
return poly
def regressionplot(x, y, fit=None):
"""
Plot a 2-D linear regression (y = slope * x + offset) overlayed over the raw data samples
"""
if fit is None:
fit = [(1, 0), None, None, None]
if not isinstance(x[0], (float, int, np.float64, np.float32)):
x = [row[0] for row in x]
poly = fit[0][0], fit[0][-1]
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(x, poly[0] * np.array(x) + poly[-1], 'r-', x, y, 'o', markersize=5)
plt.legend(['%+.2g * x + %.2g' % poly, 'Samples'])
ax.grid(True)
plt.draw()
return poly
class ColorMap(object):
def __init__(self, mat, **kwargs):
"""Render a color map (image) of a matrix or sequence of Matrix objects
A color map is like a contour map except the "height" or "value" of each matrix element
is used to select a color from a continuous spectrum of colors (for heatmap white is max and red is medium)
Arguments:
mat (np.matrix or np.array or list of list): the matrix to be rendered as a color map
"""
# try:
# self.colormaps = [ColorMap(m, cmap=cmap, pixelspervalue=pixelspervalue, minvalue=minvalue, maxvalue=maxvalue) for m in mat]
# except:
# pass
# # raise ValueError("Don't know how to display ColorMaps for a sequence of type {}".format(type(mat)))
try:
mat = np.array(mat.values)
except:
try:
mat = np.array(mat)
except:
pass
if not isinstance(mat, np.ndarray):
raise ValueError("Don't know how to display a ColorMap for a matrix of type {}".format(type(mat)))
kwargs['vmin'] = kwargs.get('vmin', np.amin(mat))
kwargs['vmax'] = kwargs.get('vmax', np.amax(mat))
kwargs['cmap'] = kwargs.get('cmap', 'bone') # 'hot', 'greens', 'blues'
kwargs['linewidths'] = kwargs.get('linewidths', 0.25)
kwargs['square'] = kwargs.get('square', True)
sb.heatmap(mat, **kwargs)
def show(self, block=False):
""" Display the last image drawn """
try:
plt.show(block=block)
except:
plt.show()
def save(self, filename):
""" save colormap to file"""
plt.savefig(filename, fig=self.fig, facecolor='black', edgecolor='black')
| mit |
lwcook/hypersonic-simulation | validate_CCY.py | 1 | 2063 | import json
import pdb
import numpy as hp
import matplotlib.pyplot as plt
import hypersonicsimulation.aerodynamics as aero
import hypersonicsimulation.geometry as geom
import hypersonicsimulation.vehicle as veh
import hypersonicsimulation.plotting as hsplot
def main():
with open('validation/CCY_validation_data.txt', 'r') as f:
lines = f.readlines()
jstring = ''
for line in lines:
first_char = line.split()[0].lower()
if first_char != '%':
jstring += line.strip('\r').strip('\n').strip('\r')
jdict = json.loads(jstring)
ccy = veh.ConeCylinder(Nstrips=30, Npanels=30)
fig, (ax1, ax2) = plt.subplots(1, 2)
Ms = [6, 7, 8, 9]
colors = [hsplot.blue, hsplot.red, hsplot.green, hsplot.grey]
for iM, M in enumerate(Ms):
print('M: ', M)
lkey = 'M_' + str(M) + '_Lift'
data = jdict[lkey]
lalphas = [d[0] for d in data]
Lifts_vlid = [d[1] for d in data]
Lifts_pred = []
for alpha in lalphas:
adyn = aero.AeroModel(M=M, alpha=alpha, dynamic_pressure=50000)
Cl, Cd = adyn.analyze_geometry(ccy.geometry, coeffs=True)
Lifts_pred.append(Cl)
dkey = 'M_' + str(M) + '_Drag'
data = jdict[dkey]
dalphas = [d[0] for d in data]
Drags_vlid = [d[1] for d in data]
Drags_pred = []
for alpha in dalphas:
adyn = aero.AeroModel(M=M, alpha=alpha, dynamic_pressure=50000)
Cl, Cd = adyn.analyze_geometry(ccy.geometry, coeffs=True)
Drags_pred.append(Cd)
ax1.plot(lalphas, Lifts_vlid, c=colors[iM], linestyle='dashed',
label='M'+str(M))
ax1.plot(lalphas, Lifts_pred, c=colors[iM], linestyle='solid')
ax2.plot(dalphas, Drags_vlid, c=colors[iM], linestyle='dashed',
label='M'+str(M))
ax2.plot(dalphas, Drags_pred, c=colors[iM], linestyle='solid')
plt.show()
hsplot.plot_geometry(ccy.geometry)
plt.show()
if __name__ == "__main__":
main()
| mit |
kgorman/WMG_speed | app/app.py | 1 | 5559 | #!/usr/bin/env python
from flask import Flask
from flask import render_template
import pandas as pd
import numpy as np
import datetime as datetime
app = Flask(__name__)
if not app.debug:
import logging
file_handler = logging.FileHandler('error.log')
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
def int_to_dow(dayno):
""" convert an integer into a day of week string """
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday',
'Sunday']
return days[int(dayno)]
def create_graph_strings(input_list):
return None
def get_raw_data():
file_name = "static/files/all_data.csv"
dataframe = pd.read_csv(file_name, header=0)
dataframe['date'] = pd.to_datetime(dataframe['date'])
return dataframe
def get_max_speed(df):
return float(max(df['peak_speed']))
def get_vehicle_count(df):
return float(sum(df['vehicle_count']))
def get_violator_count(df):
return float(sum(df['violator_count']))
def get_avg_speed(df):
theavg = np.mean(df['peak_speed'])
return round(theavg, 2)
def get_over_limit(df):
theavg = get_avg_speed(df)
return (30-theavg)*-1
def get_timeseries_by_year(df):
''' group by keys, then return strings suitable for graphing '''
df['year'] = df.date.map(lambda x: '{year}'.format(year=x.year))
grouped = df.sort(['year'], ascending=1).groupby(['year'])
vehicle_count_by_month = grouped.aggregate(np.sum)['vehicle_count']
violator_count_by_month = grouped.aggregate(np.sum)['violator_count']
keys = vehicle_count_by_month.index.get_values()
# convert to specially formatted strings
vehicle_count_by_month_l = [str(i) for i in list(vehicle_count_by_month.get_values())]
violator_count_by_month_l = [str(i) for i in list(violator_count_by_month.get_values())]
keys_l = [str(i) for i in list(keys)]
vehicle_count_by_month_str = ','.join(vehicle_count_by_month_l)
violator_count_by_month_str = ','.join(violator_count_by_month_l)
keys_str = ",".join(keys_l)
return [keys_str, vehicle_count_by_month_str, violator_count_by_month_str]
def get_speed_by_hour(df):
grouped = df.sort(['hour_of_day'], ascending=1).groupby(['hour_of_day'])
mean_speed = grouped.aggregate(np.mean)['peak_speed']
max_speed = grouped.aggregate(np.max)['peak_speed']
keys = mean_speed.index.get_values()
mean_speed_l = [str(i) for i in list(mean_speed.get_values())]
max_speed_l = [str(i) for i in list(max_speed.get_values())]
keys_l = [str(i) for i in list(keys)]
mean_speed_str = ','.join(mean_speed_l)
max_speed_str = ','.join(max_speed_l)
keys_str = ",".join(keys_l)
return [keys_str, mean_speed_str, max_speed_str]
def get_speed_by_day(df):
grouped = df.sort(['weekday'], ascending=0).groupby(['weekday'])
mean_speed = grouped.aggregate(np.mean)['peak_speed']
max_speed = grouped.aggregate(np.max)['peak_speed']
keys = mean_speed.index.get_values()
mean_dow_l = [str(i) for i in list(mean_speed.get_values())]
max_dow_l = [str(i) for i in list(max_speed.get_values())]
dow_keys_l = [int_to_dow(i) for i in list(keys)]
mean_speed_str = ','.join(mean_dow_l)
max_speed_str = ','.join(max_dow_l)
keys_str = "','".join(dow_keys_l)
keys_str = "'"+keys_str+"'"
return [keys_str, mean_speed_str, max_speed_str]
def car_count_by_hour(df):
grouped = df.sort(['date'], ascending=0).groupby(['hour_of_day'])
car_count = grouped.aggregate(np.mean)['vehicle_count']
violator_count = grouped.aggregate(np.max)['violator_count']
keys = car_count.index.get_values()
car_count_l = [str(i) for i in list(car_count.get_values())]
violator_count_l = [str(i) for i in list(violator_count.get_values())]
keys_l = [str(i) for i in list(keys)]
car_count_str = ','.join(car_count_l)
violator_count_str = ','.join(violator_count_l)
keys_str = ",".join(keys_l)
return [keys_str, car_count_str, violator_count_str]
@app.route("/")
def dashboard():
df = get_raw_data()
violator_pct = round((get_violator_count(df)/get_vehicle_count(df)*100), 2)
violator_graph = get_timeseries_by_year(df)
speed_graph = get_speed_by_hour(df)
dow_graph = get_speed_by_day(df)
car_count_graph = car_count_by_hour(df)
return render_template('index.html',
car_count=get_vehicle_count(df),
violator_count=get_violator_count(df),
violator_pct=violator_pct,
max_speed=get_max_speed(df),
avg_speed=get_avg_speed(df),
over_limit=get_over_limit(df),
ts_labels=violator_graph[0],
ts_vehicle=violator_graph[1],
ts_violator=violator_graph[2],
ts_speed_labels=speed_graph[0],
ts_mean_speed_data=speed_graph[1],
ts_max_speed_data=speed_graph[2],
ts_dow_labels=dow_graph[0],
ts_dow_mean=dow_graph[1],
ts_dow_max=dow_graph[2],
ts_car_count_labels=car_count_graph[0],
ts_car_count_count=car_count_graph[1],
ts_car_count_violators=car_count_graph[2]
)
@app.route("/about")
def about():
return render_template('about.html')
@app.route("/contact")
def contact():
return render_template('contact.html')
if __name__ == "__main__":
app.run(host='0.0.0.0')
| mit |
warmspringwinds/scikit-image | skimage/io/tests/test_mpl_imshow.py | 1 | 2822 | from __future__ import division
import numpy as np
from skimage import io
from skimage._shared._warnings import expected_warnings
import matplotlib.pyplot as plt
def setup():
io.reset_plugins()
# test images. Note that they don't have their full range for their dtype,
# but we still expect the display range to equal the full dtype range.
im8 = np.array([[0, 64], [128, 240]], np.uint8)
im16 = im8.astype(np.uint16) * 256
im64 = im8.astype(np.uint64)
imf = im8 / 255
im_lo = imf / 1000
im_hi = imf + 10
def n_subplots(ax_im):
"""Return the number of subplots in the figure containing an ``AxesImage``.
Parameters
----------
ax_im : matplotlib.pyplot.AxesImage object
The input ``AxesImage``.
Returns
-------
n : int
The number of subplots in the corresponding figure.
Notes
-----
This function is intended to check whether a colorbar was drawn, in
which case two subplots are expected. For standard imshows, one
subplot is expected.
"""
return len(ax_im.get_figure().get_axes())
def test_uint8():
ax_im = io.imshow(im8)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 255)
# check that no colorbar was created
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_uint16():
ax_im = io.imshow(im16)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 65535)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_float():
ax_im = io.imshow(imf)
assert ax_im.cmap.name == 'gray'
assert ax_im.get_clim() == (0, 1)
assert n_subplots(ax_im) == 1
assert ax_im.colorbar is None
def test_low_dynamic_range():
with expected_warnings(["Low image dynamic range"]):
ax_im = io.imshow(im_lo)
assert ax_im.get_clim() == (im_lo.min(), im_lo.max())
# check that a colorbar was created
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_outside_standard_range():
plt.figure()
with expected_warnings(["out of standard range"]):
ax_im = io.imshow(im_hi)
assert ax_im.get_clim() == (im_hi.min(), im_hi.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_nonstandard_type():
plt.figure()
with expected_warnings(["Non-standard image type"]):
ax_im = io.imshow(im64)
assert ax_im.get_clim() == (im64.min(), im64.max())
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
def test_signed_image():
plt.figure()
im_signed = np.array([[-0.5, -0.2], [0.1, 0.4]])
ax_im = io.imshow(im_signed)
assert ax_im.get_clim() == (-0.5, 0.5)
assert n_subplots(ax_im) == 2
assert ax_im.colorbar is not None
if __name__ == '__main__':
np.testing.run_module_suite()
| bsd-3-clause |
biocore/qiime | tests/test_compare_trajectories.py | 15 | 14266 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jose Antonio Navas Molina"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Jose Antonio Navas Molina", "Antonio Gonzalez Pena",
"Yoshiki Vazquez Baeza", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Jose Antonio Navas Molina"
__email__ = "[email protected]"
from operator import attrgetter
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio.stats.ordination import OrdinationResults
from skbio.stats.gradient import (GroupResults, CategoryResults,
GradientANOVAResults)
from qiime.compare_trajectories import run_trajectory_analysis
class CompareTrajectoriesTests(TestCase):
def setUp(self):
eigvals = np.array([0.512367260461, 0.300719094427, 0.267912066004,
0.208988681078, 0.19169895326, 0.16054234528,
0.15017695712, 0.122457748167, 0.0])
site = np.array([[-0.212230626531, 0.216034194368, 0.03532727349,
-0.254450494129, -0.0687468542543, 0.231895596562,
0.00496549154314, -0.0026246871695,
9.73837390723e-10],
[-0.277487312135, -0.0295483215975, -0.0744173437992,
0.0957182357964, 0.204714844022, -0.0055407341857,
-0.190287966833, 0.16307126638, 9.73837390723e-10],
[0.220886492631, 0.0874848360559, -0.351990132198,
-0.00316535032886, 0.114635191853, -0.00019194106125,
0.188557853937, 0.030002427212, 9.73837390723e-10],
[0.0308923744062, -0.0446295973489, 0.133996451689,
0.29318228566, -0.167812539312, 0.130996149793,
0.113551017379, 0.109987942454, 9.73837390723e-10],
[0.27616778138, -0.0341866951102, 0.0633000238256,
0.100446653327, 0.123802521199, 0.1285839664,
-0.132852841046, -0.217514322505, 9.73837390723e-10],
[0.202458130052, -0.115216120518, 0.301820871723,
-0.18300251046, 0.136208248567, -0.0989435556722,
0.0927738484879, 0.0909429797672, 9.73837390723e-10],
[0.236467470907, 0.21863434374, -0.0301637746424,
-0.0225473129718, -0.205287183891, -0.180224615141,
-0.165277751908, 0.0411933458557, 9.73837390723e-10],
[-0.105517545144, -0.41405687433, -0.150073017617,
-0.116066751485, -0.158763393475, -0.0223918378516,
-0.0263068046112, -0.0501209518091,
9.73837390723e-10],
[-0.371636765565, 0.115484234741, 0.0721996475289,
0.0898852445906, 0.0212491652909, -0.184183028843,
0.114877153051, -0.164938000185, 9.73837390723e-10]])
prop_expl = np.array([25.6216900347, 15.7715955926, 14.1215046787,
11.6913885817, 9.83044890697, 8.51253468595,
7.88775505332, 6.56308246609, 4.42499350906e-16])
site_ids = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354', 'PC.593',
'PC.355', 'PC.607', 'PC.634']
self.ord_res = OrdinationResults(eigvals=eigvals, site=site,
proportion_explained=prop_expl,
site_ids=site_ids)
metadata_map = {'PC.354': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '60',
'Description': 'Control_mouse_I.D._354'},
'PC.355': {'Treatment': 'Control',
'DOB': '20061218',
'Weight': '55',
'Description': 'Control_mouse_I.D._355'},
'PC.356': {'Treatment': 'Control',
'DOB': '20061126',
'Weight': '50',
'Description': 'Control_mouse_I.D._356'},
'PC.481': {'Treatment': 'Control',
'DOB': '20070314',
'Weight': '52',
'Description': 'Control_mouse_I.D._481'},
'PC.593': {'Treatment': 'Control',
'DOB': '20071210',
'Weight': '57',
'Description': 'Control_mouse_I.D._593'},
'PC.607': {'Treatment': 'Fast',
'DOB': '20071112',
'Weight': '65',
'Description': 'Fasting_mouse_I.D._607'},
'PC.634': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '68',
'Description': 'Fasting_mouse_I.D._634'},
'PC.635': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '70',
'Description': 'Fasting_mouse_I.D._635'},
'PC.636': {'Treatment': 'Fast',
'DOB': '20080116',
'Weight': '72',
'Description': 'Fasting_mouse_I.D._636'}}
self.metadata_map = pd.DataFrame.from_dict(metadata_map,
orient='index')
self.categories = ['Treatment']
self.sort_by = 'Weight'
# This function makes the comparisons between the results classes easier
def assert_group_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.name, exp.name)
npt.assert_almost_equal(obs.trajectory, exp.trajectory)
npt.assert_almost_equal(obs.mean, exp.mean)
self.assertEqual(obs.info.keys(), exp.info.keys())
for key in obs.info:
npt.assert_almost_equal(obs.info[key], exp.info[key])
self.assertEqual(obs.message, exp.message)
def assert_category_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.category, exp.category)
if exp.probability is None:
self.assertTrue(obs.probability is None)
self.assertTrue(obs.groups is None)
else:
npt.assert_almost_equal(obs.probability, exp.probability)
for o, e in zip(sorted(obs.groups, key=attrgetter('name')),
sorted(exp.groups, key=attrgetter('name'))):
self.assert_group_results_almost_equal(o, e)
def assert_gradientANOVA_results_almost_equal(self, obs, exp):
"""Tests that obs and exp are almost equal"""
self.assertEqual(obs.algorithm, exp.algorithm)
self.assertEqual(obs.weighted, exp.weighted)
for o, e in zip(sorted(obs.categories, key=attrgetter('category')),
sorted(exp.categories, key=attrgetter('category'))):
self.assert_category_results_almost_equal(o, e)
def test_run_trajectory_analysis_avg(self):
"""Correctly computes the avg method"""
obs = run_trajectory_analysis(self.ord_res, self.metadata_map,
trajectory_categories=self.categories)
exp_control_group = GroupResults('Control',
np.array([2.3694943596755276,
3.3716388181385781,
5.4452089176253367,
4.5704258453173559,
4.4972603724478377]),
4.05080566264,
{'avg': 4.0508056626409275}, None)
exp_fast_group = GroupResults('Fast', np.array([7.2220488239279126,
4.2726021564374372,
1.1169097274372082,
4.02717600030876]),
4.15968417703,
{'avg': 4.1596841770278292}, None)
exp_treatment = CategoryResults('Treatment', 0.93311555,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('avg', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_run_trajectory_analysis_trajectory(self):
"""Correctly computes the trajectory method"""
obs = run_trajectory_analysis(self.ord_res, self.metadata_map,
trajectory_categories=self.categories,
sort_category=self.sort_by,
algorithm='trajectory')
exp_control_group = GroupResults('Control', np.array([8.6681963576,
7.0962717982,
7.1036434615,
4.0675712674]),
6.73392072123,
{'2-norm': 13.874494152}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'2-norm': 12.713431181}, None)
exp_treatment = CategoryResults('Treatment', 0.9374500147,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('trajectory', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_run_trajectory_analysis_diff(self):
"""Correctly computes the first difference method"""
obs = run_trajectory_analysis(self.ord_res, self.metadata_map,
trajectory_categories=self.categories,
sort_category=self.sort_by,
algorithm='diff')
exp_control_group = GroupResults('Control', np.array([-1.5719245594,
0.0073716633,
-3.0360721941]),
-1.5335416967,
{'mean': -1.5335416967,
'std': 1.2427771485}, None)
exp_fast_group = GroupResults('Fast', np.array([-7.3127913749,
0.5779766231]),
-3.3674073758,
{'mean': -3.3674073758,
'std': 3.9453839990}, None)
exp_treatment = CategoryResults('Treatment', 0.6015260608,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('diff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_run_trajectory_analysis_wdiff(self):
"""Correctly computes the window difference method"""
obs = run_trajectory_analysis(self.ord_res, self.metadata_map,
trajectory_categories=self.categories,
sort_category=self.sort_by,
algorithm='wdiff', window_size=3)
exp_control_group = GroupResults('Control', np.array([-2.5790341819,
-2.0166764661,
-3.0360721941,
0.]),
-1.9079457105,
{'mean': -1.9079457105,
'std': 1.1592139913}, None)
exp_fast_group = GroupResults('Fast', np.array([11.2291654905,
3.9163741156,
4.4943507388]),
6.5466301150,
{'mean': 6.5466301150,
'std': 3.3194494926},
"Cannot calculate the first difference "
"with a window of size (3).")
exp_treatment = CategoryResults('Treatment', 0.0103976830,
[exp_control_group, exp_fast_group],
None)
exp = GradientANOVAResults('wdiff', False, [exp_treatment])
self.assert_gradientANOVA_results_almost_equal(obs, exp)
def test_run_trajectory_analysis_error(self):
"""Raises an error if the algorithm is not recognized"""
with self.assertRaises(ValueError):
run_trajectory_analysis(self.ord_res, self.metadata_map,
algorithm='foo')
if __name__ == '__main__':
main()
| gpl-2.0 |
OzFlux/OzFluxQC | OzFluxQC.py | 1 | 51206 | import ast
import copy
import datetime
import logging
import matplotlib
matplotlib.use('TkAgg')
#matplotlib.use('Qt4Agg')
import numpy
import ntpath
import time
import Tkinter as tk
import tkMessageBox
import os
import sys
# The Lindsay Trap: check the scripts directory is present
if not os.path.exists("./scripts/"):
print "OzFluxQC: the scripts directory is missing"
sys.exit()
# since the scripts directory is there, try importing the modules
sys.path.append('scripts')
import cfg
import qcclim
import qccpd
import qcgf
import qcio
import qcls
import qcplot
import qcrp
import qcts
import qcutils
# now check the logfiles and plots directories are present
dir_list = ["./logfiles/","./plots/"]
for item in dir_list:
if not os.path.exists(item): os.makedirs(item)
# now check the solo/inf, solo/input, solo/log and solo/output directories are present
dir_list = ["./solo/inf","./solo/input","./solo/log","./solo/output"]
for item in dir_list:
if not os.path.exists(item): os.makedirs(item)
logging.basicConfig(filename='logfiles/OzFluxQC.log',level=logging.DEBUG)
console = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
console.setFormatter(formatter)
console.setLevel(logging.INFO)
logging.getLogger('').addHandler(console)
class qcgui(tk.Tk):
"""
QC Data Main GUI
Used to access read, save, and data processing (qcls) prodecures
Columns: Data levels:
1: L1 Raw Data (read excel into NetCDF)
2: L2 QA/QC (general QA/QC algorithms, site independent)
3: L3 Corrections (Flux data corrections, site dependent based on ancillary measurements available and technical issues)
4: L4 Gap Filling (Used for fill met data gaps and ingesting SOLO-ANN Gap Filled fluxes from external processes)
Rows: function access
1: Ingest excel dataset into NetCDF files
2: Process data from previous level and generate NetCDF file(s) at current level
3-6: Show Timestamp range of dataset and accept date range for graphical plots
7: Export excel dataset from NetCDF file
"""
def __init__(self, parent):
tk.Tk.__init__(self,parent)
self.parent = parent
self.initialise()
def option_not_implemented(self):
self.do_progress(text='Option not implemented yet ...')
logging.info(' Option not implemented yet ...')
def initialise(self):
self.org_frame = tk.Frame(self)
self.org_frame.grid()
# things in the first row of the GUI
L1Label = tk.Label(self.org_frame,text='L1: Raw data')
L1Label.grid(row=0,column=0,columnspan=2)
L2Label = tk.Label(self.org_frame,text='L2: QA/QC')
L2Label.grid(row=0,column=2,columnspan=2)
L3Label = tk.Label(self.org_frame,text='L3: Process')
L3Label.grid(row=0,column=4,columnspan=2)
# things in the second row of the GUI
doL1Button = tk.Button (self.org_frame, text="Read L1 file", command=self.do_l1qc )
doL1Button.grid(row=1,column=0,columnspan=2)
doL2Button = tk.Button (self.org_frame, text="Do L2 QA/QC", command=self.do_l2qc )
doL2Button.grid(row=1,column=2,columnspan=2)
doL3Button = tk.Button (self.org_frame, text="Do L3 processing", command=self.do_l3qc )
doL3Button.grid(row=1,column=4,columnspan=2)
# things in the third row of the GUI
filestartLabel = tk.Label(self.org_frame,text='File start date')
filestartLabel.grid(row=2,column=0,columnspan=3)
fileendLabel = tk.Label(self.org_frame,text='File end date')
fileendLabel.grid(row=2,column=3,columnspan=3)
# things in the fourth row of the GUI
self.filestartValue = tk.Label(self.org_frame,text='No file loaded ...')
self.filestartValue.grid(row=3,column=0,columnspan=3)
self.fileendValue = tk.Label(self.org_frame,text='No file loaded ...')
self.fileendValue.grid(row=3,column=3,columnspan=3)
# things in the fifth row of the GUI
plotstartLabel = tk.Label(self.org_frame, text='Start date (YYYY-MM-DD)')
plotstartLabel.grid(row=4,column=0,columnspan=3)
self.plotstartEntry = tk.Entry(self.org_frame)
self.plotstartEntry.grid(row=4,column=3,columnspan=3)
# things in row sixth of the GUI
plotendLabel = tk.Label(self.org_frame, text='End date (YYYY-MM-DD)')
plotendLabel.grid(row=5,column=0,columnspan=3)
self.plotendEntry = tk.Entry(self.org_frame)
self.plotendEntry.grid(row=5,column=3,columnspan=3)
# things in the seventh row of the GUI
closeplotwindowsButton = tk.Button (self.org_frame, text="Close plot windows", command=self.do_closeplotwindows )
closeplotwindowsButton.grid(row=6,column=0,columnspan=2)
plotL1L2Button = tk.Button (self.org_frame, text="Plot L1 & L2 Data", command=self.do_plotL1L2 )
plotL1L2Button.grid(row=6,column=2,columnspan=2)
plotL3L3Button = tk.Button (self.org_frame, text="Plot L3 Data", command=self.do_plotL3L3 )
plotL3L3Button.grid(row=6,column=4,columnspan=2)
# things in the eigth row of the GUI
quitButton = tk.Button (self.org_frame, text='Quit', command=self.do_quit )
quitButton.grid(row=7,column=0,columnspan=2)
savexL2Button = tk.Button (self.org_frame, text='Write L2 Excel file', command=self.do_savexL2 )
savexL2Button.grid(row=7,column=2,columnspan=2)
savexL3Button = tk.Button (self.org_frame, text='Write L3 Excel file', command=self.do_savexL3 )
savexL3Button.grid(row=7,column=4,columnspan=2)
# other things in the GUI
self.progress = tk.Label(self.org_frame, text='Waiting for input ...')
self.progress.grid(row=8,column=0,columnspan=6,sticky="W")
# now we put together the menu, "File" first
menubar = tk.Menu(self)
filemenu = tk.Menu(menubar,tearoff=0)
filemenu.add_command(label="Concatenate netCDF",command=self.do_ncconcat)
filemenu.add_command(label="Split netCDF",command=self.do_ncsplit)
filemenu.add_command(label="List netCDF contents",command=self.option_not_implemented)
fileconvertmenu = tk.Menu(menubar,tearoff=0)
#fileconvertmenu.add_command(label="V2.7 to V2.8",command=self.do_v27tov28)
fileconvertmenu.add_command(label="nc to EddyPro (biomet)",command=self.do_nc2ep_biomet)
fileconvertmenu.add_command(label="nc to FluxNet",command=self.do_nc2fn)
fileconvertmenu.add_command(label="nc to REddyProc",command=self.do_nc2reddyproc)
fileconvertmenu.add_command(label="nc to SMAP",command=self.do_nc2smap)
fileconvertmenu.add_command(label="nc to xls",command=self.do_nc2xls)
fileconvertmenu.add_command(label="xls to nc",command=self.option_not_implemented)
filemenu.add_cascade(label="Convert",menu=fileconvertmenu)
filemenu.add_separator()
filemenu.add_command(label="Quit",command=self.do_quit)
menubar.add_cascade(label="File",menu=filemenu)
# now the "Run" menu
runmenu = tk.Menu(menubar,tearoff=0)
runmenu.add_command(label="Read L1 file",command=self.do_l1qc)
runmenu.add_command(label="Do L2 QA/QC",command=self.do_l2qc)
runmenu.add_command(label="Do L3 processing",command=self.do_l3qc)
runmenu.add_command(label="Do L4 gap fill (drivers)",command=self.do_l4qc)
runmenu.add_command(label="Do L5 gap fill (fluxes)",command=self.do_l5qc)
runmenu.add_command(label="Do L6 partitioning",command=self.do_l6qc)
menubar.add_cascade(label="Run",menu=runmenu)
# then the "Plot" menu
plotmenu = tk.Menu(menubar,tearoff=0)
plotmenu.add_command(label="Plot L1 & L2",command=self.do_plotL1L2)
plotmenu.add_command(label="Plot L3",command=self.do_plotL3L3)
plotmenu.add_command(label="Plot L4",command=self.do_plotL3L4)
plotmenu.add_command(label="Plot L5",command=self.option_not_implemented)
plotmenu.add_command(label="Plot L6 summary",command=self.do_plotL6_summary)
fnmenu = tk.Menu(menubar,tearoff=0)
fnmenu.add_command(label="Standard",command=lambda:self.do_plotfluxnet(mode="standard"))
fnmenu.add_command(label="Custom",command=lambda:self.do_plotfluxnet(mode="custom"))
plotmenu.add_cascade(label="30 minute",menu=fnmenu)
#plotmenu.add_command(label="FluxNet",command=self.do_plotfluxnet)
fpmenu = tk.Menu(menubar,tearoff=0)
fpmenu.add_command(label="Standard",command=lambda:self.do_plotfingerprint(mode="standard"))
fpmenu.add_command(label="Custom",command=lambda:self.do_plotfingerprint(mode="custom"))
plotmenu.add_cascade(label="Fingerprint",menu=fpmenu)
plotmenu.add_command(label="Quick check",command=self.do_plotquickcheck)
plotmenu.add_command(label="Years check",command=self.option_not_implemented)
plotmenu.add_separator()
plotmenu.add_command(label="Close plots",command=self.do_closeplotwindows)
menubar.add_cascade(label="Plot",menu=plotmenu)
# and the "Utilities" menu
utilsmenu = tk.Menu(menubar,tearoff=0)
climatologymenu = tk.Menu(menubar,tearoff=0)
climatologymenu.add_command(label="Standard",command=lambda:self.do_climatology(mode="standard"))
climatologymenu.add_command(label="Custom",command=lambda:self.do_climatology(mode="custom"))
utilsmenu.add_cascade(label="Climatology",menu=climatologymenu)
utilsmenu.add_command(label="Compare Ah",command=self.option_not_implemented)
utilsmenu.add_command(label="Compare EP",command=self.do_compare_eddypro)
ustarmenu = tk.Menu(menubar,tearoff=0)
ustarmenu.add_command(label="Reichstein",command=self.option_not_implemented)
ustarmenu.add_command(label="Change Point Detection",command=self.do_cpd)
utilsmenu.add_cascade(label="u* threshold",menu=ustarmenu)
menubar.add_cascade(label="Utilities",menu=utilsmenu)
# and the "Help" menu
helpmenu = tk.Menu(menubar,tearoff=0)
helpmenu.add_command(label="Contents",command=self.do_helpcontents)
helpmenu.add_command(label="About",command=self.option_not_implemented)
menubar.add_cascade(label="Help",menu=helpmenu)
self.config(menu=menubar)
def do_climatology(self,mode="standard"):
"""
Calls qcclim.climatology
"""
logging.info(' Starting climatology')
self.do_progress(text='Doing climatology ...')
if mode=="standard":
stdname = "controlfiles/standard/climatology.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
self.do_progress(text='Opening input file ...')
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0:
logging.info( " Climatology: no input file chosen")
self.do_progress(text='Waiting for input ...')
return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Doing the climatology')
qcclim.climatology(cf)
self.do_progress(text='Finished climatology')
logging.info(' Finished climatology')
logging.info("")
def do_closeplotwindows(self):
"""
Close plot windows
"""
self.do_progress(text='Closing plot windows ...') # tell the user what we're doing
logging.info(' Closing plot windows ...')
matplotlib.pyplot.close('all')
#fig_numbers = [n.num for n in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
##logging.info(' Closing plot windows: '+str(fig_numbers))
#for n in fig_numbers:
#matplotlib.pyplot.close(n)
self.do_progress(text='Waiting for input ...') # tell the user what we're doing
logging.info(' Waiting for input ...')
def do_compare_eddypro(self):
"""
Calls qcclim.compare_ep
Compares the results OzFluxQC (L3) with those from EddyPro (full output).
"""
self.do_progress(text='Comparing EddyPro and OzFlux results ...')
qcclim.compare_eddypro()
self.do_progress(text='Finished comparing EddyPro and OzFlux')
logging.info(' Finished comparing EddyPro and OzFlux')
def do_cpd(self):
"""
Calls qccpd.cpd_main
Compares the results OzFluxQC (L3) with those from EddyPro (full output).
"""
logging.info(' Starting estimation u* threshold using CPD')
self.do_progress(text='Estimating u* threshold using CPD ...')
stdname = "controlfiles/standard/cpd.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose an input nc file')
if len(filename)==0: self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
qccpd.cpd_main(cf)
self.do_progress(text='Finished estimating u* threshold')
logging.info(' Finished estimating u* threshold')
logging.info("")
def do_helpcontents(self):
tkMessageBox.showinfo("Obi Wan says ...","Read the source, Luke!")
def do_l1qc(self):
"""
Calls qcls.l1qc
"""
logging.info(" Starting L1 processing ...")
self.do_progress(text='Load L1 Control File ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0:
logging.info( " L1: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
self.do_progress(text='Doing L1 ...')
ds1 = qcls.l1qc(cf)
if ds1.returncodes["value"] == 0:
outfilename = qcio.get_outfilenamefromcf(cf)
ncFile = qcio.nc_open_write(outfilename)
qcio.nc_write_series(ncFile,ds1)
self.do_progress(text='Finished L1')
logging.info(' Finished L1')
logging.info("")
else:
msg = 'An error occurred, check the console ...'
self.do_progress(text=msg)
def do_l2qc(self):
"""
Call qcls.l2qc function
Performs L2 QA/QC processing on raw data
Outputs L2 netCDF file to ncData folder
ControlFiles:
L2_year.txt
or
L2.txt
ControlFile contents (see ControlFile/Templates/L2.txt for example):
[General]:
Enter list of functions to be performed
[Files]:
L1 input file name and path
L2 output file name and path
[Variables]:
Variable names and parameters for:
Range check to set upper and lower rejection limits
Diurnal check to reject observations by time of day that
are outside specified standard deviation limits
Timestamps for excluded dates
Timestamps for excluded hours
[Plots]:
Variable lists for plot generation
"""
logging.info(" Starting L2 processing ...")
self.do_progress(text='Load L2 Control File ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
logging.info( " L2: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
infilename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
self.do_progress(text='Doing L2 QC ...')
self.ds1 = qcio.nc_read_series(infilename)
if len(self.ds1.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds1; return
self.update_startenddate(str(self.ds1.series['DateTime']['Data'][0]),
str(self.ds1.series['DateTime']['Data'][-1]))
self.ds2 = qcls.l2qc(self.cf,self.ds1)
logging.info(' Finished L2 QC process')
self.do_progress(text='Finished L2 QC process')
self.do_progress(text='Saving L2 QC ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(self.cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
qcio.nc_write_series(ncFile,self.ds2) # save the L2 data
self.do_progress(text='Finished saving L2 QC data') # tdo_progressell the user we are done
logging.info(' Finished saving L2 QC data')
logging.info("")
def do_l3qc(self):
"""
Call qcls.l3qc_sitename function
Performs L3 Corrections and QA/QC processing on L2 data
Outputs L3 netCDF file to ncData folder
Outputs L3 netCDF file to OzFlux folder
Available corrections:
* corrections requiring ancillary measurements or samples
marked with an asterisk
Linear correction
fixed slope
linearly shifting slope
Conversion of virtual temperature to actual temperature
2D Coordinate rotation
Massman correction for frequency attenuation*
Webb, Pearman and Leuning correction for flux effects on density
measurements
Conversion of virtual heat flux to actual heat flux
Correction of soil moisture content to empirical calibration
curve*
Addition of soil heat storage to ground ground heat flux*
ControlFiles:
L3_year.txt
or
L3a.txt
ControlFile contents (see ControlFile/Templates/L3.txt for example):
[General]:
Python control parameters
[Files]:
L2 input file name and path
L3 output file name and ncData folder path
L3 OzFlux output file name and OzFlux folder path
[Massman] (where available):
Constants used in frequency attenuation correction
zmd: instrument height (z) less zero-plane displacement
height (d), m
z0: aerodynamic roughness length, m
angle: angle from CSAT mounting point between CSAT and
IRGA mid-path, degrees
CSATarm: distance from CSAT mounting point to CSAT
mid-path, m
IRGAarm: distance from CSAT mounting point to IRGA
mid-path, m
[Soil]:
Constants used in correcting Fg for storage and in empirical
corrections of soil water content
FgDepth: Heat flux plate depth, m
BulkDensity: Soil bulk density, kg/m3
OrganicContent: Soil organic content, fraction
SwsDefault
Constants for empirical corrections using log(sensor)
and exp(sensor) functions (SWC_a0, SWC_a1, SWC_b0,
SWC_b1, SWC_t, TDR_a0, TDR_a1, TDR_b0, TDR_b1,
TDR_t)
Variable and attributes lists (empSWCin, empSWCout,
empTDRin, empTDRout, linTDRin, SWCattr, TDRattr)
[Output]:
Variable subset list for OzFlux output file
[Variables]:
Variable names and parameters for:
Range check to set upper and lower rejection limits
Diurnal check to reject observations by time of day that
are outside specified standard deviation limits
Timestamps, slope, and offset for Linear correction
[Plots]:
Variable lists for plot generation
"""
logging.info(" Starting L3 processing ...")
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
logging.info( " L3: no control file chosen")
self.do_progress(text='Waiting for input ...')
return
infilename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds2 = qcio.nc_read_series(infilename)
if len(self.ds2.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds2; return
self.update_startenddate(str(self.ds2.series['DateTime']['Data'][0]),
str(self.ds2.series['DateTime']['Data'][-1]))
self.do_progress(text='Doing L3 QC & Corrections ...')
self.ds3 = qcls.l3qc(self.cf,self.ds2)
self.do_progress(text='Finished L3')
txtstr = ' Finished L3: Standard processing for site: '
txtstr = txtstr+self.ds3.globalattributes['site_name'].replace(' ','')
logging.info(txtstr)
self.do_progress(text='Saving L3 QC & Corrected NetCDF data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(self.cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(self.cf,'nc')
qcio.nc_write_series(ncFile,self.ds3,outputlist=outputlist) # save the L3 data
self.do_progress(text='Finished saving L3 QC & Corrected NetCDF data') # tell the user we are done
logging.info(' Finished saving L3 QC & Corrected NetCDF data')
logging.info("")
def do_l4qc(self):
"""
Call qcls.l4qc_gapfill function
Performs L4 gap filling on L3 met data
or
Ingests L4 gap filled fluxes performed in external SOLO-ANN and c
omputes daily sums
Outputs L4 netCDF file to ncData folder
Outputs L4 netCDF file to OzFlux folder
ControlFiles:
L4_year.txt
or
L4b.txt
ControlFile contents (see ControlFile/Templates/L4.txt and
ControlFile/Templates/L4b.txt for examples):
[General]:
Python control parameters (SOLO)
Site characteristics parameters (Gap filling)
[Files]:
L3 input file name and path (Gap filling)
L4 input file name and path (SOLO)
L4 output file name and ncData folder path (both)
L4 OzFlux output file name and OzFlux folder path
[Variables]:
Variable subset list for OzFlux output file (where
available)
"""
logging.info(" Starting L4 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds3 = qcio.nc_read_series(infilename)
if len(ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds3; return
ds3.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds3.series['DateTime']['Data'][0]),
str(ds3.series['DateTime']['Data'][-1]))
sitename = ds3.globalattributes['site_name']
self.do_progress(text='Doing L4 gap filling drivers: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds4 = qcls.l4qc(cf,ds3)
if ds4.returncodes["alternate"]=="quit" or ds4.returncodes["solo"]=="quit":
self.do_progress(text='Quitting L4: '+sitename)
logging.info(' Quitting L4: '+sitename)
else:
self.do_progress(text='Finished L4: '+sitename)
logging.info(' Finished L4: '+sitename)
self.do_progress(text='Saving L4 gap filled data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds4,outputlist=outputlist) # save the L4 data
self.do_progress(text='Finished saving L4 gap filled data') # tell the user we are done
logging.info(' Finished saving L4 gap filled data')
logging.info("")
def do_l5qc(self):
"""
Call qcls.l5qc function to gap fill the fluxes.
"""
logging.info(" Starting L5 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds4 = qcio.nc_read_series(infilename)
if len(ds4.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds4; return
ds4.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds4.series['DateTime']['Data'][0]),
str(ds4.series['DateTime']['Data'][-1]))
sitename = ds4.globalattributes['site_name']
self.do_progress(text='Doing L5 gap filling fluxes: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds5 = qcls.l5qc(cf,ds4)
if ds5.returncodes["solo"]=="quit":
self.do_progress(text='Quitting L5: '+sitename)
logging.info(' Quitting L5: '+sitename)
else:
self.do_progress(text='Finished L5: '+sitename)
logging.info(' Finished L5: '+sitename)
self.do_progress(text='Saving L5 gap filled data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds5,outputlist=outputlist) # save the L5 data
self.do_progress(text='Finished saving L5 gap filled data') # tell the user we are done
logging.info(' Finished saving L5 gap filled data')
logging.info("")
def do_l6qc(self):
"""
Call qcls.l6qc function to partition NEE into GPP and ER.
"""
logging.info(" Starting L6 processing ...")
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
infilename = qcio.get_infilenamefromcf(cf)
if len(infilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
if not qcutils.file_exists(infilename): self.do_progress(text='An error occurred, check the console ...'); return
ds5 = qcio.nc_read_series(infilename)
if len(ds5.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds5; return
ds5.globalattributes['controlfile_name'] = cf['controlfile_name']
self.update_startenddate(str(ds5.series['DateTime']['Data'][0]),
str(ds5.series['DateTime']['Data'][-1]))
sitename = ds5.globalattributes['site_name']
self.do_progress(text='Doing L6 partitioning: '+sitename+' ...')
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
ds6 = qcls.l6qc(cf,ds5)
self.do_progress(text='Finished L6: '+sitename)
logging.info(' Finished L6: '+sitename)
self.do_progress(text='Saving L6 partitioned data ...') # put up the progress message
outfilename = qcio.get_outfilenamefromcf(cf)
if len(outfilename)==0: self.do_progress(text='An error occurred, check the console ...'); return
ncFile = qcio.nc_open_write(outfilename)
outputlist = qcio.get_outputlistfromcf(cf,'nc')
qcio.nc_write_series(ncFile,ds6,outputlist=outputlist) # save the L6 data
self.do_progress(text='Finished saving L6 partitioned data') # tell the user we are done
logging.info(' Finished saving L6 partitioned data')
logging.info("")
def do_nc2ep_biomet(self):
""" Calls qcio.ep_biomet_write_csv. """
logging.info(' Starting conversion to EddyPro biomet file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to EddyPro biomet CSV ...')
return_code = qcio.ep_biomet_write_csv(self.cf)
if return_code==0:
self.do_progress(text='An error occurred, check the console ...');
return
else:
logging.info(' Finished conversion to EddyPro biomet format')
self.do_progress(text='Finished conversion to EddyPro biomet format')
logging.info("")
def do_nc2fn(self):
""" Calls qcio.fn_write_csv. """
logging.info(' Starting conversion to FluxNet CSV file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to FluxNet CSV ...')
qcio.fn_write_csv(self.cf)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2reddyproc(self):
""" Calls qcio.reddyproc_write_csv."""
logging.info(' Starting conversion to REddyProc CSV file')
self.do_progress(text="Choosing netCDF file ...")
ncfilename = qcio.get_filename_dialog(path="../Sites",title="Choose a netCDF file")
if len(ncfilename)==0 or not os.path.exists(ncfilename):
self.do_progress(text="Waiting for input ..."); return
self.do_progress(text='Converting nc to REddyProc CSV ...')
qcio.reddyproc_write_csv(ncfilename)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2smap(self):
""" Calls qcio.smap_write_csv. """
logging.info(' Starting conversion to SMAP CSV file')
self.do_progress(text='Load control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Converting nc to SMAP CSV ...')
qcio.smap_write_csv(self.cf)
logging.info(' Finished conversion')
self.do_progress(text='Finished conversion')
logging.info("")
def do_nc2xls(self):
""" Calls qcio.nc_2xls. """
logging.info(" Starting conversion to Excel file")
self.do_progress(text="Choosing netCDF file ...")
ncfilename = qcio.get_filename_dialog(path="../Sites",title="Choose a netCDF file")
if len(ncfilename)==0: self.do_progress(text="Waiting for input ..."); return
self.do_progress(text="Converting netCDF file to Excel file")
qcio.nc_2xls(ncfilename,outputlist=None)
self.do_progress(text="Finished converting netCDF file")
logging.info(" Finished converting netCDF file")
logging.info("")
def do_ncconcat(self):
"""
Calls qcio.nc_concatenate
"""
logging.info(' Starting concatenation of netCDF files')
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Concatenating files')
qcio.nc_concatenate(cf)
self.do_progress(text='Finished concatenating files')
logging.info(' Finished concatenating files')
logging.info("")
def do_ncsplit(self):
"""
Calls qcio.nc_split
"""
logging.info(' Starting split of netCDF file')
self.do_progress(text='Splitting file')
qcio.nc_split()
self.do_progress(text='Finished splitting file')
logging.info(' Finished splitting file')
logging.info("")
def do_plotfingerprint(self,mode="standard"):
""" Plot fingerprint"""
logging.info(' Starting fingerprint plot')
self.do_progress(text='Doing fingerprint plot ...')
if mode=="standard":
stdname = "controlfiles/standard/fingerprint.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0 or not os.path.exists(filename):
self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
self.do_progress(text='Plotting fingerprint ...')
qcplot.plot_fingerprint(cf)
self.do_progress(text='Finished plotting fingerprint')
logging.info(' Finished plotting fingerprint')
logging.info("")
def do_plotfluxnet(self,mode="standard"):
""" Plot FluxNet style time series of data."""
self.do_progress(text='Doing FluxNet plots ...')
if mode=="standard":
stdname = "controlfiles/standard/fluxnet.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose a netCDF file')
if len(filename)==0 or not os.path.exists(filename):
self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
else:
self.do_progress(text='Loading control file ...')
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Plotting FluxNet style plots ...')
qcplot.plot_fluxnet(cf)
self.do_progress(text='Finished FluxNet plotting')
logging.info(' Finished FluxNet plotting')
def do_plotquickcheck(self):
""" Plot quickcheck"""
self.do_progress(text='Loading control file ...')
stdname = "controlfiles/standard/quickcheck.txt"
if os.path.exists(stdname):
cf = qcio.get_controlfilecontents(stdname)
filename = qcio.get_filename_dialog(path='../Sites',title='Choose an input file')
if len(filename)==0: self.do_progress(text='Waiting for input ...'); return
if "Files" not in dir(cf): cf["Files"] = {}
cf["Files"]["file_path"] = ntpath.split(filename)[0]+"/"
cf["Files"]["in_filename"] = ntpath.split(filename)[1]
else:
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Plotting quickcheck ...')
qcplot.plot_quickcheck(cf)
self.do_progress(text='Finished plotting quickcheck')
logging.info(' Finished plotting quickcheck')
def do_plotL1L2(self):
"""
Plot L1 (raw) and L2 (QA/QC) data in blue and red, respectively
Control File for do_l2qc function used.
If L2 Control File not loaded, requires control file selection.
"""
if 'ds1' not in dir(self) or 'ds2' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
l1filename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(l1filename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds1 = qcio.nc_read_series(l1filename)
if len(self.ds1.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds1; return
l2filename = qcio.get_outfilenamefromcf(self.cf)
self.ds2 = qcio.nc_read_series(l2filename)
if len(self.ds2.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds2; return
self.update_startenddate(str(self.ds1.series['DateTime']['Data'][0]),
str(self.ds1.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L1 & L2 QC ...')
cfname = self.ds2.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds1.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds1.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds1.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds1.globalattributes['time_step'],default=-1,match='exact')
plt_cf = self.cf['Plots'][str(nFig)]
if 'Type' in plt_cf.keys():
if str(plt_cf['Type']).lower() =='xy':
self.do_progress(text='Plotting L1 and L2 XY ...')
qcplot.plotxy(self.cf,nFig,plt_cf,self.ds1,self.ds2,si,ei)
else:
self.do_progress(text='Plotting L1 and L2 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds1,self.ds2,si,ei)
else:
self.do_progress(text='Plotting L1 and L2 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds1,self.ds2,si,ei)
self.do_progress(text='Finished plotting L1 and L2')
logging.info(' Finished plotting L1 and L2, check the GUI')
def do_plotL3L3(self):
"""
Plot L3 (QA/QC and Corrected) data
Control File for do_l3qc function used.
If L3 Control File not loaded, requires control file selection.
"""
if 'ds3' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
l3filename = qcio.get_outfilenamefromcf(self.cf)
self.ds3 = qcio.nc_read_series(l3filename)
if len(self.ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds3; return
self.update_startenddate(str(self.ds3.series['DateTime']['Data'][0]),
str(self.ds3.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L3 QC ...')
cfname = self.ds3.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=-1,match='exact')
plt_cf = self.cf['Plots'][str(nFig)]
if 'Type' in plt_cf.keys():
if str(plt_cf['Type']).lower() =='xy':
self.do_progress(text='Plotting L3 XY ...')
qcplot.plotxy(self.cf,nFig,plt_cf,self.ds3,self.ds3,si,ei)
else:
self.do_progress(text='Plotting L3 QC ...')
SeriesList = ast.literal_eval(plt_cf['Variables'])
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds3,si,ei)
else:
self.do_progress(text='Plotting L3 QC ...')
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds3,si,ei)
self.do_progress(text='Finished plotting L3')
logging.info(' Finished plotting L3, check the GUI')
def do_plotL3L4(self):
"""
Plot L3 (QA/QC and Corrected) and L4 (Gap Filled) data in blue and
red, respectively
Control File for do_l4qc function used.
If L4 Control File not loaded, requires control file selection.
"""
if 'ds3' not in dir(self) or 'ds4' not in dir(self):
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0:
self.do_progress(text='Waiting for input ...')
return
l3filename = qcio.get_infilenamefromcf(self.cf)
if not qcutils.file_exists(l3filename): self.do_progress(text='An error occurred, check the console ...'); return
self.ds3 = qcio.nc_read_series(l3filename)
if len(self.ds3.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds3; return
l4filename = qcio.get_outfilenamefromcf(self.cf)
self.ds4 = qcio.nc_read_series(l4filename)
if len(self.ds4.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del self.ds4; return
self.update_startenddate(str(self.ds3.series['DateTime']['Data'][0]),
str(self.ds3.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L3 and L4 QC ...')
cfname = self.ds4.globalattributes['controlfile_name']
self.cf = qcio.get_controlfilecontents(cfname)
for nFig in self.cf['Plots'].keys():
si = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotstartEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=0,match='exact')
ei = qcutils.GetDateIndex(self.ds3.series['DateTime']['Data'],self.plotendEntry.get(),
ts=self.ds3.globalattributes['time_step'],default=-1,match='exact')
qcplot.plottimeseries(self.cf,nFig,self.ds3,self.ds4,si,ei)
self.do_progress(text='Finished plotting L4')
logging.info(' Finished plotting L4, check the GUI')
def do_plotL4L5(self):
"""
Plot L4 (Gap filled) and L5 (Partitioned) data.
"""
pass
def do_plotL6_summary(self):
"""
Plot L6 summary.
"""
cf = qcio.load_controlfile(path='controlfiles')
if len(cf)==0:
self.do_progress(text='Waiting for input ...')
return
if "Options" not in cf: cf["Options"]={}
cf["Options"]["call_mode"] = "interactive"
l6filename = qcio.get_outfilenamefromcf(cf)
if not qcutils.file_exists(l6filename): self.do_progress(text='An error occurred, check the console ...'); return
ds6 = qcio.nc_read_series(l6filename)
if len(ds6.series.keys())==0: self.do_progress(text='An error occurred, check the console ...'); del ds6; return
self.update_startenddate(str(ds6.series['DateTime']['Data'][0]),
str(ds6.series['DateTime']['Data'][-1]))
self.do_progress(text='Plotting L6 summary ...')
qcgf.ImportSeries(cf,ds6)
qcrp.L6_summary(cf,ds6)
self.do_progress(text='Finished plotting L6 summary')
logging.info(' Finished plotting L6 summary, check the GUI')
def do_progress(self,text):
"""
Update progress message in QC Data GUI
"""
self.progress.destroy()
self.progress = tk.Label(self.org_frame, text=text)
self.progress.grid(row=8,column=0,columnspan=6,sticky="W")
self.update()
def do_quit(self):
"""
Close plot windows and quit QC Data GUI
"""
self.do_progress(text='Closing plot windows ...') # tell the user what we're doing
logging.info(' Closing plot windows ...')
matplotlib.pyplot.close('all')
self.do_progress(text='Quitting ...') # tell the user what we're doing
logging.info(' Quitting ...')
self.quit()
def do_savexL2(self):
"""
Call nc2xl function
Exports excel data from NetCDF file
Outputs L2 Excel file containing Data and Flag worksheets
"""
self.do_progress(text='Exporting L2 NetCDF -> Xcel ...') # put up the progress message
# get the output filename
outfilename = qcio.get_outfilenamefromcf(self.cf)
# get the output list
outputlist = qcio.get_outputlistfromcf(self.cf,'xl')
qcio.nc_2xls(outfilename,outputlist=outputlist)
self.do_progress(text='Finished L2 Data Export') # tell the user we are done
logging.info(' Finished saving L2 data')
def do_savexL3(self):
"""
Call nc2xl function
Exports excel data from NetCDF file
Outputs L3 Excel file containing Data and Flag worksheets
"""
self.do_progress(text='Exporting L3 NetCDF -> Xcel ...') # put up the progress message
# get the output filename
outfilename = qcio.get_outfilenamefromcf(self.cf)
# get the output list
outputlist = qcio.get_outputlistfromcf(self.cf,'xl')
qcio.nc_2xls(outfilename,outputlist=outputlist)
self.do_progress(text='Finished L3 Data Export') # tell the user we are done
logging.info(' Finished saving L3 data')
def do_xl2nc(self):
"""
Calls qcio.xl2nc
"""
logging.info(" Starting L1 processing ...")
self.do_progress(text='Loading control file ...')
self.cf = qcio.load_controlfile(path='controlfiles')
if len(self.cf)==0: self.do_progress(text='Waiting for input ...'); return
self.do_progress(text='Reading Excel file & writing to netCDF')
rcode = qcio.xl2nc(self.cf,"L1")
if rcode==1:
self.do_progress(text='Finished writing to netCDF ...')
logging.info(' Finished writing to netCDF ...')
else:
self.do_progress(text='An error occurred, check the console ...')
def update_startenddate(self,startstr,endstr):
"""
Read start and end timestamps from data and report in QC Data GUI
"""
self.filestartValue.destroy()
self.fileendValue.destroy()
self.filestartValue = tk.Label(self.org_frame,text=startstr)
self.filestartValue.grid(row=3,column=0,columnspan=3)
self.fileendValue = tk.Label(self.org_frame,text=endstr)
self.fileendValue.grid(row=3,column=3,columnspan=3)
self.update()
if __name__ == "__main__":
#log = qcutils.startlog('qc','logfiles/qc.log')
qcGUI = qcgui(None)
main_title = cfg.version_name+' Main GUI '+cfg.version_number
qcGUI.title(main_title)
qcGUI.mainloop()
qcGUI.destroy()
logging.info('QC: All done')
| gpl-3.0 |
kdebrab/pandas | pandas/core/reshape/util.py | 20 | 1915 | import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.compat import reduce
from pandas.core.index import Index
from pandas.core import common as com
def match(needles, haystack):
haystack = Index(haystack)
needles = Index(needles)
return haystack.get_indexer(needles)
def cartesian_product(X):
"""
Numpy version of itertools.product or pandas.compat.product.
Sometimes faster (for large inputs)...
Parameters
----------
X : list-like of list-likes
Returns
-------
product : list of ndarrays
Examples
--------
>>> cartesian_product([list('ABC'), [1, 2]])
[array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
array([1, 2, 1, 2, 1, 2])]
See also
--------
itertools.product : Cartesian product of input iterables. Equivalent to
nested for-loops.
pandas.compat.product : An alias for itertools.product.
"""
msg = "Input must be a list-like of list-likes"
if not is_list_like(X):
raise TypeError(msg)
for x in X:
if not is_list_like(x):
raise TypeError(msg)
if len(X) == 0:
return []
lenX = np.fromiter((len(x) for x in X), dtype=np.intp)
cumprodX = np.cumproduct(lenX)
a = np.roll(cumprodX, 1)
a[0] = 1
if cumprodX[-1] != 0:
b = cumprodX[-1] / cumprodX
else:
# if any factor is empty, the cartesian product is empty
b = np.zeros_like(cumprodX)
return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
np.product(a[i]))
for i, x in enumerate(X)]
def _compose2(f, g):
"""Compose 2 callables"""
return lambda *args, **kwargs: f(g(*args, **kwargs))
def compose(*funcs):
"""Compose 2 or more callables"""
assert len(funcs) > 1, 'At least 2 callables must be passed to compose'
return reduce(_compose2, funcs)
| bsd-3-clause |
fergalbyrne/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_qt4agg.py | 70 | 4985 | """
Render to qt from agg
"""
from __future__ import division
import os, sys
import matplotlib
from matplotlib.figure import Figure
from backend_agg import FigureCanvasAgg
from backend_qt4 import QtCore, QtGui, FigureManagerQT, FigureCanvasQT,\
show, draw_if_interactive, backend_version, \
NavigationToolbar2QT
DEBUG = False
def new_figure_manager( num, *args, **kwargs ):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_qtagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass( *args, **kwargs )
canvas = FigureCanvasQTAgg( thisFig )
return FigureManagerQT( canvas, num )
class NavigationToolbar2QTAgg(NavigationToolbar2QT):
def _get_canvas(self, fig):
return FigureCanvasQTAgg(fig)
class FigureManagerQTAgg(FigureManagerQT):
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
print "Classic toolbar is not supported"
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2QTAgg(canvas, parent)
else:
toolbar = None
return toolbar
class FigureCanvasQTAgg( FigureCanvasQT, FigureCanvasAgg ):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def __init__( self, figure ):
if DEBUG: print 'FigureCanvasQtAgg: ', figure
FigureCanvasQT.__init__( self, figure )
FigureCanvasAgg.__init__( self, figure )
self.drawRect = False
self.rect = []
self.replot = True
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def resizeEvent( self, e ):
FigureCanvasQT.resizeEvent( self, e )
def drawRectangle( self, rect ):
self.rect = rect
self.drawRect = True
self.repaint( )
def paintEvent( self, e ):
"""
Draw to the Agg backend and then copy the image to the qt.drawable.
In Qt, all drawing should be done inside of here when a widget is
shown onscreen.
"""
#FigureCanvasQT.paintEvent( self, e )
if DEBUG: print 'FigureCanvasQtAgg.paintEvent: ', self, \
self.get_width_height()
# only replot data when needed
if type(self.replot) is bool: # might be a bbox for blitting
if self.replot:
FigureCanvasAgg.draw(self)
# matplotlib is in rgba byte order. QImage wants to put the bytes
# into argb format and is in a 4 byte unsigned int. Little endian
# system is LSB first and expects the bytes in reverse order
# (bgra).
if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian:
stringBuffer = self.renderer._renderer.tostring_bgra()
else:
stringBuffer = self.renderer._renderer.tostring_argb()
qImage = QtGui.QImage(stringBuffer, self.renderer.width,
self.renderer.height,
QtGui.QImage.Format_ARGB32)
p = QtGui.QPainter(self)
p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage))
# draw the zoom rectangle to the QPainter
if self.drawRect:
p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) )
p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] )
p.end()
# we are blitting here
else:
bbox = self.replot
l, b, r, t = bbox.extents
w = int(r) - int(l)
h = int(t) - int(b)
t = int(b) + h
reg = self.copy_from_bbox(bbox)
stringBuffer = reg.to_string_argb()
qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32)
pixmap = QtGui.QPixmap.fromImage(qImage)
p = QtGui.QPainter( self )
p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap)
p.end()
self.replot = False
self.drawRect = False
def draw( self ):
"""
Draw the figure when xwindows is ready for the update
"""
if DEBUG: print "FigureCanvasQtAgg.draw", self
self.replot = True
FigureCanvasAgg.draw(self)
self.update()
# Added following line to improve realtime pan/zoom on windows:
QtGui.qApp.processEvents()
def blit(self, bbox=None):
"""
Blit the region in bbox
"""
self.replot = bbox
l, b, w, h = bbox.bounds
t = b + h
self.update(l, self.renderer.height-t, w, h)
def print_figure(self, *args, **kwargs):
FigureCanvasAgg.print_figure(self, *args, **kwargs)
self.draw()
| agpl-3.0 |
xuewei4d/scikit-learn | conftest.py | 4 | 4011 | # Even if empty this file is useful so that when running from the root folder
# ./sklearn is added to sys.path by pytest. See
# https://docs.pytest.org/en/latest/pythonpath.html for more details. For
# example, this allows to build extensions in place and run pytest
# doc/modules/clustering.rst and use sklearn from the local folder rather than
# the one from site-packages.
import os
import platform
import sys
import pytest
from _pytest.doctest import DoctestItem
from sklearn.utils import _IS_32BIT
from sklearn.externals import _pilutil
from sklearn._min_dependencies import PYTEST_MIN_VERSION
from sklearn.utils.fixes import np_version, parse_version
if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION):
raise ImportError('Your version of pytest is too old, you should have '
'at least pytest >= {} installed.'
.format(PYTEST_MIN_VERSION))
def pytest_addoption(parser):
parser.addoption("--skip-network", action="store_true", default=False,
help="skip network tests")
def pytest_collection_modifyitems(config, items):
for item in items:
# FeatureHasher is not compatible with PyPy
if (item.name.endswith(('_hash.FeatureHasher',
'text.HashingVectorizer'))
and platform.python_implementation() == 'PyPy'):
marker = pytest.mark.skip(
reason='FeatureHasher is not compatible with PyPy')
item.add_marker(marker)
# Known failure on with GradientBoostingClassifier on ARM64
elif (item.name.endswith('GradientBoostingClassifier')
and platform.machine() == 'aarch64'):
marker = pytest.mark.xfail(
reason=(
'know failure. See '
'https://github.com/scikit-learn/scikit-learn/issues/17797' # noqa
)
)
item.add_marker(marker)
# Skip tests which require internet if the flag is provided
if (config.getoption("--skip-network")
or int(os.environ.get("SKLEARN_SKIP_NETWORK_TESTS", "0"))):
skip_network = pytest.mark.skip(
reason="test requires internet connectivity")
for item in items:
if "network" in item.keywords:
item.add_marker(skip_network)
# numpy changed the str/repr formatting of numpy arrays in 1.14. We want to
# run doctests only for numpy >= 1.14.
skip_doctests = False
try:
if np_version < parse_version('1.14'):
reason = 'doctests are only run for numpy >= 1.14'
skip_doctests = True
elif _IS_32BIT:
reason = ('doctest are only run when the default numpy int is '
'64 bits.')
skip_doctests = True
elif sys.platform.startswith("win32"):
reason = ("doctests are not run for Windows because numpy arrays "
"repr is inconsistent across platforms.")
skip_doctests = True
except ImportError:
pass
if skip_doctests:
skip_marker = pytest.mark.skip(reason=reason)
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
elif not _pilutil.pillow_installed:
skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!")
for item in items:
if item.name in [
"sklearn.feature_extraction.image.PatchExtractor",
"sklearn.feature_extraction.image.extract_patches_2d"]:
item.add_marker(skip_marker)
def pytest_configure(config):
import sys
sys._is_pytest_session = True
# declare our custom markers to avoid PytestUnknownMarkWarning
config.addinivalue_line(
"markers",
"network: mark a test for execution if network available."
)
def pytest_unconfigure(config):
import sys
del sys._is_pytest_session
| bsd-3-clause |
buckiracer/data-science-from-scratch | RefMaterials/code-python3/recommender_systems.py | 12 | 6248 | import math, random
from collections import defaultdict, Counter
from linear_algebra import dot
users_interests = [
["Hadoop", "Big Data", "HBase", "Java", "Spark", "Storm", "Cassandra"],
["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"],
["Python", "scikit-learn", "scipy", "numpy", "statsmodels", "pandas"],
["R", "Python", "statistics", "regression", "probability"],
["machine learning", "regression", "decision trees", "libsvm"],
["Python", "R", "Java", "C++", "Haskell", "programming languages"],
["statistics", "probability", "mathematics", "theory"],
["machine learning", "scikit-learn", "Mahout", "neural networks"],
["neural networks", "deep learning", "Big Data", "artificial intelligence"],
["Hadoop", "Java", "MapReduce", "Big Data"],
["statistics", "R", "statsmodels"],
["C++", "deep learning", "artificial intelligence", "probability"],
["pandas", "R", "Python"],
["databases", "HBase", "Postgres", "MySQL", "MongoDB"],
["libsvm", "regression", "support vector machines"]
]
popular_interests = Counter(interest
for user_interests in users_interests
for interest in user_interests).most_common()
def most_popular_new_interests(user_interests, max_results=5):
suggestions = [(interest, frequency)
for interest, frequency in popular_interests
if interest not in user_interests]
return suggestions[:max_results]
#
# user-based filtering
#
def cosine_similarity(v, w):
return dot(v, w) / math.sqrt(dot(v, v) * dot(w, w))
unique_interests = sorted(list({ interest
for user_interests in users_interests
for interest in user_interests }))
def make_user_interest_vector(user_interests):
"""given a list of interests, produce a vector whose i-th element is 1
if unique_interests[i] is in the list, 0 otherwise"""
return [1 if interest in user_interests else 0
for interest in unique_interests]
user_interest_matrix = list(map(make_user_interest_vector, users_interests))
user_similarities = [[cosine_similarity(interest_vector_i, interest_vector_j)
for interest_vector_j in user_interest_matrix]
for interest_vector_i in user_interest_matrix]
def most_similar_users_to(user_id):
pairs = [(other_user_id, similarity) # find other
for other_user_id, similarity in # users with
enumerate(user_similarities[user_id]) # nonzero
if user_id != other_user_id and similarity > 0] # similarity
return sorted(pairs, # sort them
key=lambda pair: pair[1], # most similar
reverse=True) # first
def user_based_suggestions(user_id, include_current_interests=False):
# sum up the similarities
suggestions = defaultdict(float)
for other_user_id, similarity in most_similar_users_to(user_id):
for interest in users_interests[other_user_id]:
suggestions[interest] += similarity
# convert them to a sorted list
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[1],
reverse=True)
# and (maybe) exclude already-interests
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
#
# Item-Based Collaborative Filtering
#
interest_user_matrix = [[user_interest_vector[j]
for user_interest_vector in user_interest_matrix]
for j, _ in enumerate(unique_interests)]
interest_similarities = [[cosine_similarity(user_vector_i, user_vector_j)
for user_vector_j in interest_user_matrix]
for user_vector_i in interest_user_matrix]
def most_similar_interests_to(interest_id):
similarities = interest_similarities[interest_id]
pairs = [(unique_interests[other_interest_id], similarity)
for other_interest_id, similarity in enumerate(similarities)
if interest_id != other_interest_id and similarity > 0]
return sorted(pairs,
key=lambda pair: pair[1],
reverse=True)
def item_based_suggestions(user_id, include_current_interests=False):
suggestions = defaultdict(float)
user_interest_vector = user_interest_matrix[user_id]
for interest_id, is_interested in enumerate(user_interest_vector):
if is_interested == 1:
similar_interests = most_similar_interests_to(interest_id)
for interest, similarity in similar_interests:
suggestions[interest] += similarity
suggestions = sorted(suggestions.items(),
key=lambda pair: pair[1],
reverse=True)
if include_current_interests:
return suggestions
else:
return [(suggestion, weight)
for suggestion, weight in suggestions
if suggestion not in users_interests[user_id]]
if __name__ == "__main__":
print("Popular Interests")
print(popular_interests)
print()
print("Most Popular New Interests")
print("already like:", ["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"])
print(most_popular_new_interests(["NoSQL", "MongoDB", "Cassandra", "HBase", "Postgres"]))
print()
print("already like:", ["R", "Python", "statistics", "regression", "probability"])
print(most_popular_new_interests(["R", "Python", "statistics", "regression", "probability"]))
print()
print("User based similarity")
print("most similar to 0")
print(most_similar_users_to(0))
print("Suggestions for 0")
print(user_based_suggestions(0))
print()
print("Item based similarity")
print("most similar to 'Big Data'")
print(most_similar_interests_to(0))
print()
print("suggestions for user 0")
print(item_based_suggestions(0))
| unlicense |
zorroblue/scikit-learn | examples/applications/plot_model_complexity_influence.py | 40 | 6385 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
# #############################################################################
# Routines
# Initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
# #############################################################################
# Main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True, 'tol': 1e-3},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
rxl194/18-327-wavelets-filter-banks | tools/Handout_examples.py | 2 | 7554 | ## Handout_examples.py
## This is my implementation of the Handout and Slide examples for the Lecture Notes of
## using Python libraries numpy, scipy
##
## The main reference that I'll use is
## Gilbert Strang, and Kevin Amaratunga. 18.327 Wavelets, Filter Banks and Applications, Spring 2003. (Massachusetts Institute of Technology: MIT OpenCourseWare), http://ocw.mit.edu (Accessed 19 Jun, 2015). License: Creative Commons BY-NC-SA
##
##
##
#####################################################################################
## Copyleft 2015, Ernest Yeung <[email protected]>
##
## 20150619
##
## This program, along with all its code, is free software;
## you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You can have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software Foundation, Inc.,
## S1 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## If you like what I'm doing and would like to help and contribute support,
## please take a look at my crowdfunding campaign at ernestyalumni.tilt.com
## and subscription-based Patreon
## read my mission statement and give your financial support,
## no matter how small or large,
## if you can
## and to keep checking my ernestyalumni.wordpress.com blog and
## various social media channels
## for updates as I try to keep putting out great stuff.
##
## Fund Science! Help my physics education outreach and research efforts at
## Open/Tilt or subscription Patreon - Ernest Yeung
##
## ernestyalumni.tilt.com
##
## Facebook : ernestyalumni
## gmail : ernestyalumni
## google : ernestyalumni
## linkedin : ernestyalumni
## Patreon : ernestyalumni
## Tilt/Open : ernestyalumni
## tumblr : ernestyalumni
## twitter : ernestyalumni
## youtube : ernestyalumni
## wordpress : ernestyalumni
##
##
################################################################################
##
##
##
##
####################
## MIT OCW 18.327
####################
###############
# Handout 1
###############
import numpy as np
import matplotlib.pyplot as plt
# Shout outs to ESCI 386 - Scientific Programming, Analysis and Visualization with Python
# LEsson 17 - Fourier Transforms, the lecture slides are good for espousing on the examples with Python
# http://snowball.millersville.edu/~adecaria/ESCI386P/esci386-lesson17-Fourier-Transforms.pdf
N = 100 # Number of data points
dt = 1.0 # Sampling period (in seconds)
time = dt*np.arange(0,N) # time coordinates
ht = np.zeros(N)
ht[0] = 0.5
ht[1] = 0.5
hhatf = np.fft.fft(ht)
freqs = np.fft.fftfreq(N,dt)
hhatf = np.fft.fftshift(hhatf) # Shift zero frequency to center
freqs = np.fft.fftshift(freqs) # Shift zero frequence to center
Fig0101, ax0101 = plt.subplots(3,1,sharex=True)
ax0101[0].plot( freqs, np.real( hhatf) ) # Plot Cosine terms
ax0101[0].set_ylabel(r'$Re[\widehat{h}(2\pi f)]$', size='x-large')
ax0101[1].plot( freqs, np.imag( hhatf) ) # Plot Sine terms
ax0101[1].set_ylabel(r'$Im[\widehat{h}(2\pi f)]$', size='x-large')
ax0101[2].plot( freqs, np.absolute( hhatf)**2 ) # Plot spectral power
ax0101[2].set_ylabel(r'$|\widehat{h}(2\pi f)|^2$', size='x-large')
ax0101[2].set_xlabel(r'$f$', size='x-large')
# plt.show()
# if you want this in radians (as I do)
T = 2.0*np.pi # Sampling period (in seconds)
omegas = np.fft.fftfreq(N,1./T) # rad./sec
omegas = np.fft.fftshift(omegas) # Shift zero frequence to center
Fig0101b, ax0101b = plt.subplots(3,1,sharex=True)
ax0101b[0].plot( omegas, np.real( hhatf) ) # Plot Cosine terms
ax0101b[0].set_ylabel(r'$Re[\widehat{h}(\omega)]$', size='x-large')
ax0101b[1].plot( omegas, np.imag( hhatf) ) # Plot Sine terms
ax0101b[1].set_ylabel(r'$Im[\widehat{h}(\omega)]$', size='x-large')
ax0101b[2].plot( omegas, np.absolute( hhatf)**2 ) # Plot spectral power
ax0101b[2].set_ylabel(r'$|\widehat{h}(\omega)|^2$', size='x-large')
ax0101b[2].set_xlabel(r'$\omega \, (rad/sec)$', size='x-large')
#Fig0101b.suptitle("Low pass Filter example", fontsize=10) # add a centered title to the figure
# plt.show()
Fig0101ba, axba = plt.subplots(1,1)
# axba.plot( omegas, np.arccos( np.real( hhatf)/np.absolute(hhatf) ) )
axba.plot( omegas, np.arctan2( np.imag( hhatf), np.real(hhatf)) )
axba.set_ylabel(r'$\phi(\omega)$',size='x-large')
axba.set_xlabel(r'$\omega \, (rad/sec)$', size='x-large')
# axba.title(0,0,"Low-pass filter phase")
#####
## High-pass filter example
#####
ht[1]=-0.5
hhatf = np.fft.fft(ht)
hhatf = np.fft.fftshift(hhatf) # Shift zero frequency to center
Fig0101c, ax0101c = plt.subplots(3,1,sharex=True)
ax0101c[0].plot( omegas, np.real( hhatf) ) # Plot Cosine terms
ax0101c[0].set_ylabel(r'$Re[\widehat{h}(\omega)]$', size='x-large')
ax0101c[1].plot( omegas, np.imag( hhatf) ) # Plot Sine terms
ax0101c[1].set_ylabel(r'$Im[\widehat{h}(\omega)]$', size='x-large')
ax0101c[2].plot( omegas, np.absolute( hhatf)**2 ) # Plot spectral power
ax0101c[2].set_ylabel(r'$|\widehat{h}(\omega)|^2$', size='x-large')
ax0101c[2].set_xlabel(r'$\omega \, (rad/sec)$', size='x-large')
#Fig0101c.suptitle("Low pass Filter example", fontsize=10) # add a centered title to the figure
# plt.show()
Fig0101ca, axca = plt.subplots(1,1)
axca.plot( omegas, np.arctan2( np.imag( hhatf), np.real( hhatf)))
axca.set_ylabel(r'$\phi(\omega)$',size='x-large')
axca.set_xlabel(r'$\omega \, (rad/sec)$', size='x-large')
| mit |
youngmp/park_and_ermentrout_2016 | generate_figures.py | 1 | 45864 | """
Run to generate figures
Requires TeX; may need to install texlive-extra-utils on linux
the main() function at the end calls the preceding individual figure functions.
figures are saved as both png and pdf.
Copyright (c) 2016, Youngmin Park, Bard Ermentrout
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import matplotlib
from matplotlib.ticker import MultipleLocator
import matplotlib.ticker as mticker
import matplotlib.pylab as mp
import matplotlib.gridspec as gridspec
from matplotlib import rc
rc('text', usetex=True)
rc('font', family='serif', serif=['Computer Modern Roman'])
#from matplotlib import rcParams
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
#matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{bm}"]
matplotlib.rcParams['text.latex.preamble'] = [r'\boldmath \usepackage{bm}']
matplotlib.rcParams.update({'figure.autolayout': True})
sizeOfFont = 20
fontProperties = {'weight' : 'bold', 'size' : sizeOfFont}
lamomfsize=40 #lambda omega figure size
import phase_model
import lambda_omega
import euler
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import pyplot as plt
from scipy.integrate import odeint
# modified specgram (use my_specgram)
from specgram_mod import *
# default parms (trb2)
default_gm0=.3;default_gm1=.5;
default_eps=.0025;default_f=.5
# default parms (lamom2)
default_eps_lamom=0.0025;default_f_lamom=1.
default_a=1.;default_alpha=1.
default_beta=1.
def trb2_p_fig(gm0=default_gm0,gm1=default_gm1,eps=default_eps,f=default_f,partype='p'):
"""
two weakly coupled trab models, periodic slowly varying parameter figure
data files created using trb2simple.ode and trb2simple_just1.ode
"""
# initialize
#filename = "trb2_psi_maxn_qp"+str(filenum)
#filename = "trb2_psi_maxn_p1_ref.dat"
filename = "trb2_psi_maxn_p1_ref2.dat" # with reviewer's fix
#filename = "trb2_psi_maxn_p1_refined_2tables.dat"
dat = np.loadtxt(filename)
psi0=np.mean(dat[:,1][:int(5/.05)])
T=dat[:,0][-1]
N = len(dat[:,0])
t = np.linspace(0,T,N)
noisefile = None
# generate data for plots
sol = euler.ESolve(phase_model.happrox,psi0,t,args=(gm0,gm1,f,eps,partype,noisefile))
full_model = np.abs(np.mod(dat[:,1]+.5,1)-.5) # [0] to make regular row array
slow_phs_model = np.abs(np.mod(sol+.5,1)-.5)[:,0]
# create plot object
fig, ax1 = plt.subplots()
fig.set_size_inches(10,5)
## plot data+theory
ax1.scatter(dat[:,0]/1000.,full_model*2*np.pi,s=.5,facecolor="gray")
ax1.plot(np.linspace(0,dat[:,0][-1]/1000.,N),slow_phs_model*2*np.pi,lw=5,color="#3399ff")
ax1.set_ylabel(r'$\bm{|\phi(t)|}$',fontsize=20)
ax1.set_xlabel(r'$\bm{t (s)}$',fontsize=20)
# set tick intervals
myLocatorx = mticker.MultipleLocator(2000/1000.)
#myLocatory = mticker.MultipleLocator(.5)
ax1.xaxis.set_major_locator(myLocatorx)
#ax1.yaxis.set_major_locator(myLocatory)
# make plot fit window
ax1.set_yticks(np.arange(0,0.5,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
ax1.set_ylim(np.amin([full_model])*2*np.pi,np.amax([full_model])*2*np.pi)
ax1.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
## plot P param
ax2 = ax1.twinx()
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
# slowly varying parameter
gm = gm0+(gm1-gm0)*np.cos(eps*f*t)
# set tick intervals
myLocatory2 = mticker.MultipleLocator(.1)
ax2.yaxis.set_major_locator(myLocatory2)
# make param plot fit window
ax2.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
ax2.set_ylim(np.amin(gm),np.amax(gm))
# plot param + stability line
ax2.plot(t/1000.,gm,lw=4,color="red",linestyle='--',dashes=(10,2))
ax2.plot([dat[:,0][0]/1000.,dat[:,0][-1]/1000.],[0.3,0.3],lw=2,color='red')
# set ticks to red
for tl in ax2.get_yticklabels():
tl.set_color('r')
# beautify
ax1.tick_params(labelsize=20,top='off')
ax1.tick_params(axis='x',pad=8)
ax2.tick_params(labelsize=20,top='off')
plt.gcf().subplots_adjust(bottom=0.15)
return fig
def trb2newpar_p_fig(gm0=default_gm0,gm1=default_gm1,eps=default_eps,f=default_f,partype='p'):
"""
two weakly coupled trab models, periodic slowly varying parameter figure, with parameters in interval [0.05,0.3]
data files created using trb2_new_params/trb2simple_newpar.ode
"""
# initialize
# no more switch from stable/unstable. There always exists a stable point
#filename = "trb2_new_params/trb2newpar_psi_p.dat" # no normalization by variance
filename = "trb2_new_params/trb2newpar_psi_p2.dat" # includes normalization by variance
dat = np.loadtxt(filename)
psi0=np.mean(dat[:,1][:int(5/.05)])
T=dat[:,0][-1]
N = len(dat[:,0])
dt = T/(1.*N)
t = np.linspace(0,T,N)
noisefile = None
# generate data for plots
sol = euler.ESolve(phase_model.happrox_newpar,psi0,t,args=(gm0,gm1,f,eps,partype,noisefile))
full_model = np.abs(np.mod(dat[:,1]+.5,1)-.5) # [0] to make regular row array
slow_phs_model = np.abs(np.mod(sol+.5,1)-.5)[:,0]
# create plot object
fig, ax1 = plt.subplots()
fig.set_size_inches(10,5)
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
## plot data+theory
ax1.scatter(dat[:,0]/1000.,full_model*2*np.pi,s=.5,facecolor="gray")
ax1.plot(np.linspace(0,dat[:,0][-1]/1000.,N),slow_phs_model*2*np.pi,lw=5,color="#3399ff")
myLocatorx = mticker.MultipleLocator(2000/1000.)
#myLocatory = mticker.MultipleLocator(.5)
ax1.xaxis.set_major_locator(myLocatorx)
#ax1.yaxis.set_major_locator(myLocatory)
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
ax1.set_ylabel(r'$\bm{|\phi(t)|}$',fontsize=20)
ax1.set_xlabel(r'$\bm{t (s)}$',fontsize=20)
# make plot fit window
ax1.set_ylim(np.amin([full_model])*2*np.pi,np.amax(full_model)*2*np.pi)
ax1.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
## plot P param
ax2 = ax1.twinx()
gm = gm0+(gm1-gm0)*np.cos(eps*f*t)
ax2.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
ax2.plot(t/1000.,gm,lw=4,color="red",linestyle='--',dashes=(10,2))
myLocatory2 = mticker.MultipleLocator(.05)
ax2.yaxis.set_major_locator(myLocatory2)
#ax2.plot([dat[:,0][0],dat[:,0][-1]],[0.3,0.3],lw=2,color='red')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# beautify
ax1.tick_params(labelsize=20,top='off')
ax1.tick_params(axis='x',pad=8)
ax2.tick_params(labelsize=20,top='off')
plt.gcf().subplots_adjust(bottom=0.15)
return fig
def trb2_qp_fig(gm0=default_gm0,gm1=default_gm1,eps=default_eps,f=default_f,partype='qp'):
"""
two weakly coupled trab models, quasi-periodic slowly varying parameter figure
data files created using trb2simple.ode and trb2simple_just1.ode
"""
# initialize
#filename = "trb2_psi_maxn_qp"+str(filenum)
#filename = "trb2_psi_maxn_qp_ref.dat"
filename = "trb2_psi_maxn_qp_ref2.dat" # with reviewer fix
dat = np.loadtxt(filename)
psi0=np.mean(dat[:,1][:int(5/.05)])
T=dat[:,0][-1]
N = len(dat[:,0])
t = np.linspace(0,T,N)
noisefile = None
# generate data for plots
sol = euler.ESolve(phase_model.happrox,psi0,t,args=(gm0,gm1,f,eps,partype,noisefile))
full_model = np.abs(np.mod(dat[:,1]+.5,1)-.5) # [0] to make regular row array
slow_phs_model = np.abs(np.mod(sol+.5,1)-.5)[:,0]
# create plot object
rc('font', weight='bold')
fig, ax1 = plt.subplots()
fig.set_size_inches(10,5)
# plot data+theory
ax1.scatter(dat[:,0]/1000.,full_model*2*np.pi,s=.5,facecolor="gray")
ax1.plot(np.linspace(0,dat[:,0][-1]/1000.,N),slow_phs_model*2*np.pi,lw=5,color="#3399ff")
ax1.set_ylabel(r'$\bm{|\phi(t)|}$',fontsize=20)
ax1.set_xlabel(r'$\bm{t (s)}$',fontsize=20)
#myLocatorx = mticker.MultipleLocator(5000/1000.)
myLocatory = mticker.MultipleLocator(.5)
#ax1.xaxis.set_major_locator(myLocatorx)
ax1.yaxis.set_major_locator(myLocatory)
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
# make plot fit window
ax1.set_ylim(np.amin([full_model])*2*np.pi,np.amax(full_model)*2*np.pi)
ax1.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
## plot QP param
ax2 = ax1.twinx()
gm = gm0+((gm1-gm0)/2)*(np.cos(eps*f*t)+np.cos(np.sqrt(2)*eps*f*t))
ax2.plot(t/1000.,gm,lw=4,color="red",linestyle='--',dashes=(10,2))
ax2.plot([dat[:,0][0]/1000.,dat[:,0][-1]/1000.],[0.3,0.3],lw=2,color='red')
myLocatory2 = mticker.MultipleLocator(.1)
ax2.yaxis.set_major_locator(myLocatory2)
ax2.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# beautify
ax1.tick_params(labelsize=20,top='off')
ax1.tick_params(axis='x',pad=8)
ax2.tick_params(labelsize=20,top='off')
plt.gcf().subplots_adjust(bottom=0.15)
return fig
def trb2_s_fig(filenum=4,gm0=default_gm0,gm1=default_gm1,eps=default_eps,f=default_f,partype='s'):
"""
two weakly coupled trab models, stochastic "slowly" varying parameter figure
data files created using
trb2simple.ode
trb2simple_just1.ode
generateou.ode
"""
# initialize
#filename = "trb2_psi_maxn_s"+str(filenum)+".dat"
#filename = "trb2_psi_maxn_s1.dat"
#filename = "trb2_psi_maxn_s"+str(filenum)+"_mu1k.dat"
filename = "trb2_psi_maxn_s"+str(filenum)+"_mu1k2.dat" # with reviewer edit
dat = np.loadtxt(filename)
psi0=np.mean(dat[:,1][:int(5/.05)])
T=dat[:,0][-1]
N = len(dat[:,0])
dt = T/(1.*N)
t = np.linspace(0,T,N)
#noisefile = np.loadtxt("ounormed"+str(filenum)+".tab")
noisefile = np.loadtxt("ounormed"+str(filenum)+"_mu1k.tab")
# generate data for plots
sol = euler.ESolve(phase_model.happrox,psi0,t,args=(gm0,gm1,f,eps,partype,noisefile))
full_model = np.abs(np.mod(dat[:,1]+.5,1)-.5) # [0] to make regular row array
slow_phs_model = np.abs(np.mod(sol+.5,1)-.5)[:,0]
# create plot object
fig = plt.figure()
fig.set_size_inches(10,7.5)
gs = gridspec.GridSpec(2,3)
ax1 = plt.subplot(gs[:1,:])
# plot data+theory
ax1.scatter(dat[:,0]/1000.,full_model*2*np.pi,s=.5,facecolor="gray")
ax1.plot(np.linspace(0,dat[:,0][-1]/1000.,N),slow_phs_model*2*np.pi,lw=4,color="#3399ff")
ax1.set_ylabel(r'$\bm{|\phi(t)|}$',fontsize=20)
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi$"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
# make plot fit window
ax1.set_ylim(np.amin(full_model)*2*np.pi,np.amax(full_model)*2*np.pi)#np.amax(full_model))
ax1.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
#myLocatory = mticker.MultipleLocator(.5)
#ax1.yaxis.set_major_locator(myLocatory)
## plot s param
ax2 = plt.subplot(gs[1,:])
s_N = len(noisefile[3:])
ax2.plot(np.linspace(0,dat[:,0][-1]/1000.,s_N),(gm0+(gm1-gm0)*noisefile[3:]),lw=1,color="red")
ax2.plot([dat[:,0][0]/1000.,dat[:,0][-1]/1000.],[0.3,0.3],lw=3,color='red',linestyle='--',dashes=(10,2))
myLocatorx = mticker.MultipleLocator(2000/1000.)
ax2.xaxis.set_major_locator(myLocatorx)
ax2.set_xlim(dat[:,0][0]/1000.,dat[:,0][-1]/1000.)
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
myLocatory2 = mticker.MultipleLocator(.1)
ax2.yaxis.set_major_locator(myLocatory2)
ax2.set_xlabel(r'$\bm{t (s)}$',fontsize=20)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax1.tick_params(labelsize=20,
top='off',
right='off')
ax1.xaxis.set_ticklabels([])
#ax2.set_xticks([])
#ax2.set_yticks([])
ax2.tick_params(labelsize=20,
top='off',
right='off')
ax2.tick_params(axis='x',pad=8)
ax2.set_frame_on(False)
return fig
def lamom2_p_fig(q0,q1,eps=default_eps_lamom,
f=default_f_lamom,a=default_a,alpha=default_alpha,
beta=default_beta,partype='p'):
"""
two weakly coupled lambda-omega models, periodic slowly varying parameter figure
the model is simulated in this function. calls functions from lambda_omega.py
"""
# initialize
#filename = "trb2_psi_maxn_qp"+str(filenum)
trueperiod = 2*np.pi
T = trueperiod*2000
dt = 0.05
N = int(T/dt)
t = np.linspace(0,T,N)
noisefile = None
initc = [2/np.sqrt(2),2/np.sqrt(2),-2/np.sqrt(2),2/np.sqrt(2)]
# generate data for plots
lcsolcoupled = odeint(lambda_omega.lamom_coupled,initc,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
phi1init = np.arctan2(initc[1],initc[0])
phi2init = np.arctan2(initc[3],initc[2])
# compute hodd
# get theory phase
phi_theory = odeint(lambda_omega.Hodd,
phi2init-phi1init,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
theta1 = np.arctan2(lcsolcoupled[:,1],lcsolcoupled[:,0])
theta2 = np.arctan2(lcsolcoupled[:,3],lcsolcoupled[:,2])
phi_exp = np.mod(theta2-theta1+np.pi,2*np.pi)-np.pi
phi_theory = np.mod(phi_theory+np.pi,2*np.pi)-np.pi
# create plot object
fig, ax1 = plt.subplots()
fig.set_size_inches(10,5)
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
## plot data+theory
ax1.plot(t,phi_exp,lw=5,color='black')
ax1.plot(t,phi_theory,lw=5,color="#3399ff",ls='dashdot',dashes=(10,5))
if q0 == 0.9:
ax1.set_ylabel(r'$\bm{\phi(t)}$',fontsize=lamomfsize)
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi$"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
padding = 0.1
ax1.set_ylim(-0.1,np.pi+0.1)
#ax1.set_xlabel(r'$\bm{t}$',fontsize=20)
#xtick_locs = np.arange(0,T+2000,2000,dtype='int')
#ytick_locs = np.arange(0,np.pi+0.5,0.5)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.xticks(xtick_locs, [r"" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
#fig = plt.figure(figsize=(15,7.5))
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# make plot fit window
#ax1.set_ylim(np.amin([full_model]),np.amax(full_model))
#ax1.set_xlim(dat[:,0][0],dat[:,0][-1])
## plot P param
ax2 = ax1.twinx()
q = q0+q1*np.cos(eps*f*t)
# dumb hack to get bold right-side axis labels # used boldmath instead
#minval=np.amin(q);maxval=np.amax(q);increment=(maxval-minval)/8.
#ytick_loc2 = np.arange(minval,maxval+increment,increment)
#ytick_lab2 = []
# http://stackoverflow.com/questions/6649597/python-decimal-places-putting-floats-into-a-string
#for val in ytick_loc2:
# ytick_lab2.append(r'\boldmath ${0:.2f}$'.format(val))
#ax2.set_yticks(ytick_loc2)
#ax2.set_yticklabels(ytick_lab2)
ax2.set_xlim(0,T)
ax2.set_ylim(np.amin(q),np.amax(q))
if q0 == 1.1:
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=lamomfsize,color='red')
ax2.plot(t,q,lw=4,color="red",linestyle='--',dashes=(10,2))
ax2.plot([t[0],t[-1]],[1,1],lw=2,color='red')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# beautify
ax1.tick_params(labelsize=lamomfsize,top='off')
ax2.tick_params(labelsize=lamomfsize,top='off')
plt.gcf().subplots_adjust(bottom=0.15)
#axes.set_xticks([])
#axes.set_yticks([])
#axes.set_frame_on(False)
ax1.set_xticks([])
#ax1.set_yticks([])
#ax1.set_frame_on(False)
#ax1.tick_params(labelsize=16)
#ax2.set_xticks([])
#ax2.set_yticks([])
#ax2.set_frame_on(False)
#ax2.tick_params(labelsize=16)
return fig
def lamom2_qp_fig(q0,q1,eps=default_eps_lamom,
f=default_f_lamom,a=default_a,alpha=default_alpha,
beta=default_beta,partype='qp'):
"""
two weakly coupled lambda-omega models, quasi-periodic slowly varying parameter figure
the model is simulated in this function. calls functions from lambda_omega.py
"""
# initialize
#filename = "trb2_psi_maxn_qp"+str(filenum)
trueperiod = 2*np.pi
T = trueperiod*2000
dt = 0.05
N = int(T/dt)
t = np.linspace(0,T,N)
noisefile = None
initc = [2/np.sqrt(2),2/np.sqrt(2),-2/np.sqrt(2),2/np.sqrt(2)]
# generate data for plots
lcsolcoupled = odeint(lambda_omega.lamom_coupled,initc,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
phi1init = np.arctan2(initc[1],initc[0])
phi2init = np.arctan2(initc[3],initc[2])
# compute hodd
# get theory phase
phi_theory = odeint(lambda_omega.Hodd,
phi2init-phi1init,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
theta1 = np.arctan2(lcsolcoupled[:,1],lcsolcoupled[:,0])
theta2 = np.arctan2(lcsolcoupled[:,3],lcsolcoupled[:,2])
phi_exp = np.mod(theta2-theta1+np.pi,2*np.pi)-np.pi
phi_theory = np.mod(phi_theory+np.pi,2*np.pi)-np.pi
# create plot object
fig, ax1 = plt.subplots()
fig.set_size_inches(10,5)
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
## plot data+theory
ax1.plot(t,phi_exp,lw=5,color='black')
ax1.plot(t,phi_theory,lw=5,color="#3399ff",ls='dashdot',dashes=(10,5))
if q0 == 0.9:
ax1.set_ylabel(r'$\bm{\phi(t)}$',fontsize=lamomfsize)
ax1.set_xlabel(r'$\bm{t}$',fontsize=lamomfsize)
ax1.xaxis.set_major_locator(MultipleLocator(4000))
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi$"]
#x_label = [r"$0$", r"$\frac{\pi}{4}$", r"$\frac{\pi}{2}$", r"$\frac{3\pi}{4}$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
ax1.set_ylim(-0.1,np.pi+0.1)
#xtick_locs = np.arange(0,T+2000,2000,dtype='int')
#ytick_locs = np.arange(0,np.pi+0.5,0.5)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.xticks(xtick_locs, [r"" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
#fig = plt.figure(figsize=(15,7.5))
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# make plot fit window
#ax1.set_ylim(np.amin([full_model]),np.amax(full_model))
#ax1.set_xlim(dat[:,0][0],dat[:,0][-1])
## plot P param
ax2 = ax1.twinx()
q = q0+(q1/2.)*(np.cos(eps*f*t)+np.cos(np.sqrt(2)*eps*f*t))
# dumb hack to get bold right-side axis labels
#minval=np.amin(q);maxval=np.amax(q);increment=(maxval-minval)/8.
#ytick_loc2 = np.arange(minval,maxval+increment,increment)
#ytick_lab2 = []
# http://stackoverflow.com/questions/6649597/python-decimal-places-putting-floats-into-a-string
#for val in ytick_loc2:
# ytick_lab2.append(r'\boldmath ${0:.2f}$'.format(val))
#ax2.set_yticks(ytick_loc2)
#ax2.set_yticklabels(ytick_lab2)
ax2.set_xlim(0,T)
ax2.set_ylim(np.amin(q),np.amax(q))
if q0 == 1.1:
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=lamomfsize,color='red')
ax2.plot(t,q,lw=4,color="red",linestyle='--',dashes=(10,2))
ax2.plot([t[0],t[-1]],[1,1],lw=2,color='red')
for tl in ax2.get_yticklabels():
tl.set_color('r')
# beautify
ax1.tick_params(labelsize=lamomfsize,top='off')
ax2.tick_params(labelsize=lamomfsize,top='off')
plt.gcf().subplots_adjust(bottom=0.15)
#axes.set_xticks([])
#axes.set_yticks([])
#axes.set_frame_on(False)
#ax1.set_xticks([])
#ax1.set_yticks([])
#ax1.set_frame_on(False)
#ax1.tick_params(labelsize=16)
#ax2.set_xticks([])
#ax2.set_yticks([])
#ax2.set_frame_on(False)
#ax2.tick_params(labelsize=16)
return fig
def lamom2_s_fig(q0,q1,filenum,eps=default_eps_lamom,
f=default_f_lamom,a=default_a,alpha=default_alpha,
beta=default_beta,partype='s'):
"""
two weakly coupled lambda-omega models, stochastic "slowly" varying parameter figure
the model is simulated in this function. calls functions from lambda_omega.py
filenum: seed
"""
# initialize
#filename = "trb2_psi_maxn_s"+str(filenum)+".dat"
#filename = "trb2_psi_maxn_s1.dat"
dt=.05
noisefile = np.loadtxt("ounormed"+str(filenum)+"_mu1k.tab")
total = noisefile[2]
t = np.linspace(0,total,total/dt)
initc = [2/np.sqrt(2),2/np.sqrt(2),-2/np.sqrt(2),2/np.sqrt(2)]
# generate data for plots
lcsolcoupled = euler.ESolve(lambda_omega.lamom_coupled,initc,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
phi1init = np.arctan2(initc[1],initc[0])
phi2init = np.arctan2(initc[3],initc[2])
# compute Hodd
# get theory phase
phi_theory = euler.ESolve(lambda_omega.Hodd,
phi2init-phi1init,t,args=(a,alpha,beta,eps,q0,q1,f,dt,partype,noisefile))
theta1 = np.arctan2(lcsolcoupled[:,1],lcsolcoupled[:,0])
theta2 = np.arctan2(lcsolcoupled[:,3],lcsolcoupled[:,2])
phi_exp = np.mod(theta2-theta1+np.pi,2*np.pi)-np.pi
phi_theory = np.mod(phi_theory+np.pi,2*np.pi)-np.pi
# create plot object
fig = plt.figure()
gs = gridspec.GridSpec(2,3)
#ax1 = plt.subplot2grid((3,3),(0,0),colspan=3,rowspan=2)
#ax2 = plt.subplot2grid((3,3),(2,0),colspan=3)
ax1 = plt.subplot(gs[:1,:])
# bold tick labels
ax1.set_yticks(np.arange(0,0.5+.125,.125)*2*np.pi)
x_label = [r"$0$", r"$\pi/4$", r"$\pi/2$", r"$3\pi/4$", r"$\pi$"]
ax1.set_yticklabels(x_label, fontsize=lamomfsize)
#ytick_locs = np.arange(np.amin(phi_theory),np.amax(phi_theory),
# (np.amax(phi_theory)-np.amin(phi_theory))/8.)
#plt.yticks(ytick_locs, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs])
ax2 = plt.subplot(gs[1,:])
#fig, axarr = plt.subplots(2, sharex=True)
#axarr[0] = plt.subplot2grid(
fig.set_size_inches(10,7.5)
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# plot data+theory
ax1.plot(t,phi_exp,lw=5,color="black")
ax1.plot(t,phi_theory,lw=5,color="#3399ff",ls='dashdot',dashes=(10,5))
if q0 == .9:
ax1.set_ylabel(r'$\bm{\phi(t)}$',fontsize=lamomfsize)
#ax1.yaxis.set_major_locator(MultipleLocator(0.4))
# make plot fit window
#ax1.set_ylim(np.amin(full_model),0.3)#np.amax(full_model))
#ax1.set_xlim(dat[:,0][0],dat[:,0][-1])
ax1.set_xlim(0,total)
ax1.set_ylim(-0.1,np.pi+0.1)
# plot s param
q = q0+(q1)*noisefile[3:]
print 'mean =',np.mean(q),'for seed='+str(filenum)
#ax2 = plt.subplots(2,1,1)
#ax2 = ax1.twinx()
s_N = len(noisefile[3:])
s_N_half = s_N#int(s_N/2.)
ax2.plot(np.linspace(0,t[-1],s_N),q,lw=1,color="red")
ax2.plot([t[0],t[-1]],[1,1],lw=3,color='red',linestyle='--',dashes=(10,2))
#ax2.set_xlim(dat[:,0][0],dat[:,0][-1])
if q0 == .9:
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=lamomfsize,color='red')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.set_xlabel(r'$\bm{t}$',fontsize=lamomfsize)
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.yaxis.set_major_locator(MultipleLocator(0.4))
ax2.xaxis.set_major_locator(MultipleLocator(4000))
ax2.set_xlim(0,total)
#xtick_locs = np.arange(t[0], t[-1], 2000,dtype='int')
#minval=np.amin(q);maxval=np.amax(q)
#ytick_locs = np.arange(minval,maxval,(maxval-minval)/8.)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs])
#axes.set_xticks([])
#axes.set_yticks([])
#axes.set_frame_on(False)
ax1.set_xticks([])
#ax1.set_yticks([])
#ax1.set_frame_on(False)
ax1.tick_params(labelsize=lamomfsize,
top='off',
right='off')
#ax2.set_xticks([])
#ax2.set_yticks([])
ax2.tick_params(labelsize=lamomfsize,
top='off',
right='off')
ax2.set_frame_on(False)
return fig
def lo_inhom():
"""
weakly coupled lambda-omega with slight frequency difference
data generated in XPP
"""
phi_full_0025=np.loadtxt('phi-full-0025.dat')
phi_reduce_0025=np.loadtxt('phi-reduce-0025.dat')
phi_full_025=np.loadtxt('phi-full-025.dat')
phi_reduce_025=np.loadtxt('phi-reduce-025.dat')
# create plot object
fig = plt.figure()
fig.set_size_inches(10,7.5)
gs = gridspec.GridSpec(2,3)
ax1 = plt.subplot(gs[:1,:])
# plot data+theory for eps=.025
ax1.plot([0,phi_full_025[-1,0]],[np.pi,np.pi],color='gray',lw=1.7)
ax1.plot([0,phi_full_025[-1,0]],[0,0],color='gray',lw=1.7)
ax1.plot([0,phi_full_025[-1,0]],[2*np.pi,2*np.pi],color='gray',lw=1.7)
ax1.plot(phi_full_025[:,0],phi_full_025[:,1],lw=3,color="black")
ax1.plot(phi_reduce_025[:,0],phi_reduce_025[:,1],lw=2,color="#3399ff",ls='dashdot',dashes=(10,1))
# bold axis labels
min1=np.amin(phi_full_025[:,1]);max1=np.amax(phi_full_025[:,1])
padding1 = (max1-min1)/16.
xtick_locs1 = np.arange(phi_full_025[0,0],phi_full_025[-1,0], 2000,dtype='int')
#ytick_locs1 = np.arange(min1,max1,np.pi/2)#padding1*2)
ax1.set_yticks(np.arange(0,1+.25,.25)*2*np.pi)
x_label = [r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{3\pi}{2}$", r"$2\pi$"]
ax1.set_yticklabels(x_label, fontsize=20)
plt.xticks(xtick_locs1, [r"$\mathbf{%s}$" % x for x in xtick_locs1])
#plt.yticks(ytick_locs1, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs1])
# make plot fit window
ax1.set_ylim(min1-padding1,max1+padding1)#np.amax(full_model))
# axis labels
ax1.set_ylabel(r'$\bm{\phi(t)}$',fontsize=20)
ax1.set_xlabel(r'$\bm{t}$',fontsize=20)
ax2 = plt.subplot(gs[1,:])
# plot data+theory for eps=.0025
ax2.plot([0,phi_full_0025[-1,0]],[np.pi,np.pi],color='gray',lw=1.7)
ax2.plot([0,phi_full_0025[-1,0]],[0,0],color='gray',lw=1.7)
ax2.plot([0,phi_full_0025[-1,0]],[2*np.pi,2*np.pi],color='gray',lw=1.7)
ax2.plot(phi_full_0025[:,0],phi_full_0025[:,1],lw=3,color="black")
ax2.plot(phi_reduce_0025[:,0],phi_reduce_0025[:,1],lw=2,color="#3399ff",ls='dashdot',dashes=(10,2))
# bold tick labels
min2=np.amin(phi_full_0025[:,1]);max2=np.amax(phi_full_0025[:,1])
padding2 = (max2-min2)/16.
xtick_locs2 = np.arange(phi_full_0025[0,0],phi_full_0025[-1,0], 20000,dtype='int')
#ytick_locs2 = np.arange(min2,max2,2*padding2)
ax2.set_yticks(np.arange(0,1+.25,.25)*2*np.pi)
x_label = [r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{3\pi}{2}$", r"$2\pi$"]
ax2.set_yticklabels(x_label, fontsize=20)
plt.xticks(xtick_locs2, [r"$\mathbf{%s}$" % x for x in xtick_locs2])
#plt.yticks(ytick_locs2, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs2])
# make plot fit window
ax2.set_ylim(min2-padding2,max2+padding2)#np.amax(full_model))
# axis labels
ax2.set_ylabel(r'$\bm{\phi(t)}$',fontsize=20)
ax2.set_xlabel(r'$\bm{t}$',fontsize=20)
#axes.set_xticks([])
#axes.set_yticks([])
#axes.set_frame_on(False)
#ax1.set_xticks([])
#ax1.set_yticks([])
#ax1.set_frame_on(False)
ax1.tick_params(labelsize=20,
top='off',
right='off')
#ax2.set_xticks([])
#ax2.set_yticks([])
ax2.tick_params(labelsize=20,
top='off',
right='off')
#ax2.set_frame_on(False)
return fig
def trb2_prc_hodd():
"""
comparison of traub model PRCs for different parameter values + Fourier approximation
at gm=0.1 and gm=0.5
"""
adj1 = np.loadtxt('trb2_adjoint.gm_0.1.dat')
adj5 = np.loadtxt('trb2_adjoint.gm_0.5.dat')
hfun1 = np.loadtxt('trb2_hfun.gm_0.1.dat')
hfun5 = np.loadtxt('trb2_hfun.gm_0.5.dat')
# create plot object
fig = plt.figure()
fig.set_size_inches(10,7.5)
gs = gridspec.GridSpec(2,3)
ax1 = plt.subplot(gs[:1,:])
# plot adjoint gm=.1,gm=.5
ax1.plot(np.linspace(0,2*np.pi,len(adj1[:,1])),adj1[:,1],lw=6,color="blue")
ax1.plot(np.linspace(0,2*np.pi,len(adj5[:,1])),adj5[:,1],lw=6,color="red")#,ls='dashdot',dashes=(10,3))
ax1.text(.55*2*np.pi,1.2,r'$\bm{q=0.5}$',fontsize=24)
ax1.text(.18*2*np.pi,.25,r'$\bm{q=0.1}$',fontsize=24)
# text label for gm
#ax1.text()
# bold axis labels
min1=np.round(np.amin(adj5[:,1]),1);max1=np.round(np.amax(adj5[:,1]),1)
padding1 = (max1-min1)/16.
#padding_alt = np.round((max1-min1)/5.,decimals=1)
#xtick_locs1 = np.linspace(0,1,6)#,dtype='int')
#ytick_locs1 = np.arange(min1,max1+padding1,padding1*4)
#ytick_locs1 = np.arange(min1,max1+padding_alt,padding_alt)
#plt.xticks(xtick_locs1, [r"$\mathbf{%s}$" % x for x in xtick_locs1])
#plt.yticks(ytick_locs1, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs1])
#plt.yticks(ytick_locs1, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs1])
# make plot fit window
ax1.set_ylim(min1-padding1,max1+padding1)#np.amax(full_model))
ax1.set_xlim(0,2*np.pi)
# axis labels
ax1.set_ylabel(r'$\bm{Z}$',fontsize=20)
ax1.set_xlabel(r'$\bm{\phi}$',fontsize=20)
ax1.set_xticks(np.arange(0,1+.25,.25)*2*np.pi)
x_label = [r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{3\pi}{2}$", r"$2\pi$"]
ax1.set_xticklabels(x_label, fontsize=20)
ax2 = plt.subplot(gs[1,:])
# actual hfunctions
hodd1 = -(np.flipud(hfun1[:,1])-hfun1[:,1])/2.
hodd5 = -(np.flipud(hfun5[:,1])-hfun5[:,1])/2.
# approx. hfunctions # call from phase model
b11=.7213877067760438022;b15=-6.24915908247
b21=0.738313204983;b25=1.43126232962
phi = np.linspace(0,2*np.pi,30)
happroxgm1=2*(b11*np.sin(phi)+b21*np.sin(2*phi))
happroxgm5=2*(b15*np.sin(phi)+b25*np.sin(2*phi))
# gm1
ax2.plot(np.linspace(0,2*np.pi,len(hfun1[:,1])),hodd1,lw=7,color="blue")
ax2.plot(phi,-happroxgm1,lw=5,color='black',ls='dashed',dashes=(5,2))
ax2.plot(phi,-happroxgm1,lw=3,color='cyan',ls='dashed',dashes=(5,2))
ax2.text(.1*2*np.pi,-6,r'$\bm{q=0.1}$',fontsize=24)
#gm 5
#ax2.plot(np.linspace(0,1,len(hfun5[:,1])),hodd5,lw=7,color="red",ls='dashdot',dashes=(10,3))
ax2.plot(np.linspace(0,2*np.pi,len(hfun5[:,1])),hodd5,lw=7,color="red")
ax2.plot(phi,-happroxgm5,lw=5,color='black',ls='dashed',dashes=(5,2))
ax2.plot(phi,-happroxgm5,lw=3,color='#ffd80a',ls='dashed',dashes=(5,2))
ax2.text(.4*2*np.pi,11,r'$\bm{q=0.5}$',fontsize=24)
#ax2.plot(phi,-happroxgm1,lw=3,color='#3399ff',marker='s',markersize=10)
#ax2.plot(phi,-happroxgm5,lw=3,color='#ff9999',marker='D',markersize=10)
"""
plot horizontal zero line + zero intersections
"""
# get idx of zero crossings of Hodd for q=0.1:
zero_crossings = np.where(np.diff(np.sign(hodd1)))[0]
# horizontal line at Hodd=0
ax2.plot([0,2*np.pi],[0,0],color='gray',zorder=-3,lw=3)
xx = np.linspace(0,2*pi,len(hfun1[:,1]))
if len(zero_crossings) > 0:
for idx in zero_crossings:
# plot zero crossings (above horizontal line in zorder)
ax2.scatter(xx[idx],hodd1[idx],s=300,zorder=-2,facecolor='black',edgecolor='black')
# bold tick labels
min2=np.round(np.amin(hodd5));max2=np.round(np.amax(hodd5))
padding2 = (max2-min2)/16.
#padding2_alt = (max2-min2)/5.
#xtick_locs2 = np.linspace(0,1,6)#,dtype='int')
#ytick_locs2 = np.arange(min2,max2+padding2_alt,padding2_alt)
#plt.xticks(xtick_locs2, [r"$\mathbf{%s}$" % x for x in xtick_locs2])
#plt.yticks(ytick_locs2, [r"$\mathbf{%1.1f}$" % x for x in ytick_locs2])
# make plot fit window
ax2.set_ylim(min2-padding2,max2+padding2)#np.amax(full_model))
ax2.set_xlim(0,2*np.pi)
# axis labels
ax2.set_ylabel(r'$\bm{H_{odd}(\phi)}$',fontsize=20)
ax2.set_xlabel(r'$\bm{\phi}$',fontsize=20)
ax2.set_xticks(np.arange(0,1+.25,.25)*2*np.pi)
ax2.set_xticklabels(x_label, fontsize=20)
#axes.set_xticks([])
#axes.set_yticks([])
#axes.set_frame_on(False)
#ax1.set_xticks([])
#ax1.set_yticks([])
#ax1.set_frame_on(False)
ax1.tick_params(labelsize=20,
axis='x',pad=10)
ax1.tick_params(labelsize=20,
top='off',
right='off',
axis='both')
#ax2.set_xticks([])
#ax2.set_yticks([])
ax2.tick_params(labelsize=20,
axis='x',pad=10)
ax2.tick_params(labelsize=20,
top='off',
right='off',
axis='both')
#ax2.set_frame_on(False)
return fig
def trb50_specgram():
"""
spectrogram of 50 weakly coupled traub models.
"""
dt = 0.1
#x = np.loadtxt('vtot-stot-gmod-v25.dat') # all signals
x = np.loadtxt('gooddata50.dat') # all signals
t = x[:,0]/1000-2 # convert to units of s
vtot = x[:,1] # total voltage signal
stot = x[:,2] # total syn signal
g = x[:,3] # param
#t = np.linspace(0,100,100/dt)
#vtot = sin(t*20*np.pi*dt)
NFFT = 4096 # the length of the windowing segments (units of ms/dt)
no = 4000
Fs = int(1000.0/dt) # the sampling frequency in Hz?
#fig = plt.figure(figsize=(15,7.5))
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
fig = mp.figure()
fig.set_size_inches(10,7.5)
# plot Vtot
ax1 = mp.subplot(211)
ax1.set_title('')
ax1.set_ylabel(r'$\textbf{Membrane Potential}$',fontsize=20)
#ax1.set_xticks([])
mp.plot(t,vtot)
#xtick_locs = range(5000, 20000, 2000)
ytick_locs = np.arange(-85,-40,5)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
# plot param
ax2 = ax1.twinx()
#ax3 = mp.subplot(313)
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
#ax2.set_xlabel(r'\textbf{t (s)}')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.plot(t,g,lw=5,color='red')
# dumb hack to get bold right-side axis labels
#minval=np.amin(g);maxval=np.amax(g);increment=(maxval-minval)/8.
ytick_loc2 = np.arange(0,.6,.1)#np.arange(minval,maxval+increment,increment)
ytick_lab2 = []
# http://stackoverflow.com/questions/6649597/python-decimal-places-putting-floats-into-a-string
for val in ytick_loc2:
ytick_lab2.append(r'\boldmath ${0:.1f}$'.format(val))
ax2.set_yticks(ytick_loc2)
ax2.set_yticklabels(ytick_lab2)
# plot spectrogram
ax3 = mp.subplot(212, sharex=ax1)
minfreq=15;maxfreq=120
Pxx, freqs, bins, im = my_specgram(vtot, NFFT=NFFT, Fs=Fs, noverlap=no,minfreq=minfreq,maxfreq=maxfreq)#,
#cmap=cm.gist_heat)
#ax2.specgram(vtot, NFFT=NFFT, Fs=Fs, noverlap=no)#,
ax3.set_ylabel(r'$\textbf{Frequency}$', fontsize=20)
ax3.set_ylim(minfreq,maxfreq)
# colorbar
cbar = fig.colorbar(im, orientation='horizontal',shrink=.8,pad=.25)
cbar.set_label(r'$\textbf{Intensity}$')
# bold x,y-ticks
xtick_locs3 = np.arange(0,14,2)
ytick_locs3 = np.arange(20,140,20)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
plt.xticks(xtick_locs3, [r"$\mathbf{%s}$" % x for x in xtick_locs3])
plt.yticks(ytick_locs3, [r"$\mathbf{%s}$" % x for x in ytick_locs3])
ax3.set_xlabel(r'$\textbf{Time (Seconds)}$',fontsize=20)
# beautify
ax1.tick_params(labelsize=20,top='off',labelbottom='off')
ax2.tick_params(labelsize=20,top='off')
ax3.tick_params(labelsize=20,top='off')
return fig
def trb50_op():
"""
order parameter of 50 weakly coupled traub models
"""
fig = mp.figure()
fig.set_size_inches(10,5)
dat = np.loadtxt('gm-op.dat')
t=dat[:,0];op=dat[:,1];gm=dat[:,2]
mp.plot(t,op,color='black',lw=3)
mp.plot(t,gm,color='red',lw=3)
mp.text(25,.53,'$\mathbf{q(t)}$',color='red',fontsize=20)
mp.xlim(t[0],t[-1])
mp.xlabel(r'\textbf{Time (Seconds)}',fontsize=20)
mp.ylabel(r'\textbf{Order Parameter}',fontsize=20)
xtick_locs = np.arange(20,75,10)
ytick_locs = np.arange(0,1.02,0.2)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
mp.tick_params(labelsize=20,top='off')
#ax1 = mp.subplot(111)
#ax1.plot(t,op)
#ax2 = ax1.twinx()
#ax2.plot(t,gm,color='red')
return fig
def trb50_specgram_op():
"""
Combined specgram, order parameter fig
"""
dt = 0.1
#x = np.loadtxt('vtot-stot-gmod-v25.dat') # all signals
x = np.loadtxt('gooddata50.dat') # all signals
t = x[:,0]/1000-2 # convert to units of s
vtot = x[:,1] # total voltage signal
stot = x[:,2] # total syn signal
g = x[:,3] # param
#t = np.linspace(0,100,100/dt)
#vtot = sin(t*20*np.pi*dt)
NFFT = 4096 # the length of the windowing segments (units of ms/dt)
no = 4000
Fs = int(1000.0/dt) # the sampling frequency in Hz?
#fig = plt.figure(figsize=(15,7.5))
#axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
fig = plt.figure()
fig.set_size_inches(10,12.5)
# plot Vtot
ax1 = fig.add_subplot(311)
ax1.set_title('')
ax1.set_ylabel(r'$\textbf{Membrane Potential}$',fontsize=20)
#ax1.set_xticks([])
ax1.plot(t,vtot)
#xtick_locs = range(5000, 20000, 2000)
#ytick_locs = np.arange(-85,-40,5)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
sublabelsize=25 # subfigure label (a),(b),(c) font size
#from matplotlib.font_manager import FontProperties
ax1.text(-1.7,-40,r'$\textbf{(a)}$',fontsize=sublabelsize)
# plot param
ax2 = ax1.twinx()
#ax3 = mp.subplot(313)
ax2.set_ylabel(r'$\bm{q(t)}$',fontsize=20,color='red')
#ax2.set_xlabel(r'\textbf{t (s)}')
for tl in ax2.get_yticklabels():
tl.set_color('r')
ax2.plot(t,g,lw=5,color='red')
# dumb hack to get bold right-side axis labels
#minval=np.amin(g);maxval=np.amax(g);increment=(maxval-minval)/8.
#ytick_loc2 = np.arange(0,.6,.1)#np.arange(minval,maxval+increment,increment)
#ytick_lab2 = []
# http://stackoverflow.com/questions/6649597/python-decimal-places-putting-floats-into-a-string
#for val in ytick_loc2:
# ytick_lab2.append(r'\boldmath ${0:.1f}$'.format(val))
#ax2.set_yticks(ytick_loc2)
#ax2.set_yticklabels(ytick_lab2)
# plot spectrogram
ax3 = fig.add_subplot(312, sharex=ax1)
minfreq=15;maxfreq=120
Pxx, freqs, bins, im = my_specgram(vtot, NFFT=NFFT, Fs=Fs, noverlap=no,minfreq=minfreq,maxfreq=maxfreq)#,
#cmap=cm.gist_heat)
#ax2.specgram(vtot, NFFT=NFFT, Fs=Fs, noverlap=no)#,
ax3.set_ylabel(r'$\textbf{Frequency}$', fontsize=20)
ax3.set_ylim(minfreq,maxfreq)
# colorbar
cbar = fig.colorbar(im, orientation='horizontal',shrink=.8,pad=.25)
cbar.set_label(r'$\textbf{Intensity}$',size=15)
#print dir(cbar)
cbar.ax.tick_params(labelsize=20)
# bold x,y-ticks
#xtick_locs3 = np.arange(0,14,2)
#ytick_locs3 = np.arange(20,140,20)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.xticks(xtick_locs3, [r"$\mathbf{%s}$" % x for x in xtick_locs3])
#plt.yticks(ytick_locs3, [r"$\mathbf{%s}$" % x for x in ytick_locs3])
ax3.set_xlabel(r'$\textbf{Time (Seconds)}$',fontsize=20)
ax3.text(-1.7,120,r'$\textbf{(b)}$',fontsize=sublabelsize)
## plot OP
ax4 = fig.add_subplot(313)
dat = np.loadtxt('gm-op.dat')
# convert units of ms/eps to Seconds
t=dat[:,0]*1./(0.0025*1000)
op=dat[:,1];gm=dat[:,2]
ax4.plot(t,op,color='black',lw=3)
ax4.plot(t,gm,color='red',lw=3)
ax4.text(9.7,.53,r'$\bm{q(t)}$',color='red',fontsize=20)
ax4.set_xlim(t[0],t[-1])
ax4.set_xlabel(r'\textbf{Time (Seconds)}',fontsize=20)
ax4.set_ylabel(r'\textbf{Order Parameter}',fontsize=20)
ax4.text(5,1,r'$\textbf{(c)}$',fontsize=sublabelsize)
#xtick_locs = np.arange(20,75,10)
#ytick_locs = np.arange(0,1.02,0.2)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
#ax1 = mp.subplot(111)
#ax1.plot(t,op)
#ax2 = ax1.twinx()
#ax2.plot(t,gm,color='red')
# beautify
ax1.tick_params(labelsize=20,top='off',labelbottom='off')
ax2.tick_params(labelsize=20,top='off')
ax3.tick_params(labelsize=20,top='off')
ax4.tick_params(labelsize=20,top='off')
return fig
def fd_diagram():
"""
f-d parameter space.
"""
fd_wn0 = np.loadtxt('fd_wn0.dat')
fd_wn1 = np.loadtxt('fd_wn1.dat')
fig = plt.figure()
fig.set_size_inches(10,7.5)
# plot Vtot
ax1 = fig.add_subplot(111)
#ax1.set_title(r'\textbf{(a)}',x=-.1,y=1.08)
ax1.set_xlabel(r'$\textbf{d}$',fontsize=20)
ax1.set_ylabel(r'$\textbf{f}$',fontsize=20)
#ax1.set_xticks([])
ax1.scatter(fd_wn0[:,0],fd_wn0[:,1],marker='+',color='red')
ax1.scatter(fd_wn1[:,0],fd_wn1[:,1],marker='x',color='green')
ax1.set_xlim(0,.15)
ax1.set_ylim(0,2)
#xtick_locs = range(5000, 20000, 2000)
#ytick_locs = np.arange(-85,-40,5)
#plt.xticks(xtick_locs, [r"$\mathbf{%s}$" % x for x in xtick_locs])
#plt.yticks(ytick_locs, [r"$\mathbf{%s}$" % x for x in ytick_locs])
sublabelsize=25 # subfigure label (a),(b),(c) font size
#from matplotlib.font_manager import FontProperties
#ax1.text(-1.7,-40,r'$\textbf{(a)}$',fontsize=sublabelsize)
#ax1.tick_params(labelsize=20,top='off',labelbottom='off')
return fig
def generate_figure(function, args, filenames, title="", title_pos=(0.5,0.95)):
# workaround for python bug where forked processes use the same random
# filename.
#tempfile._name_sequence = None;
fig = function(*args)
#fig.text(title_pos[0], title_pos[1], title, ha='center')
if type(filenames) == list:
for name in filenames:
if name.split('.')[-1] == 'ps':
fig.savefig(name, orientation='landscape')
else:
fig.savefig(name)
else:
if name.split('.')[-1] == 'ps':
fig.savefig(filenames,orientation='landscape')
else:
fig.savefig(filenames)
def main():
figures = [
#(trb2newpar_p_fig, [.175,.3,default_eps,default_f,'p'], ['trb2newpar_p.png']),
#(trb2_p_fig, [], ['trb2_p_fig.png']),
#(trb2_qp_fig, [], ['trb2_qp_fig.png']),
#(trb2_s_fig, [], ['trb2_s4_fig.png']),
#(lamom2_p_fig, [0.9,1.], ['lamom2_p_fig1.pdf','lamom2_p_fig1.eps']),
#(lamom2_p_fig, [1.1,1.], ['lamom2_p_fig2.pdf','lamom2_p_fig2.eps']),
#(lamom2_qp_fig, [0.9,1.], ['lamom2_qp_fig1.pdf','lamom2_qp_fig1.eps']),
#(lamom2_qp_fig, [1.1,1.], ['lamom2_qp_fig2.pdf','lamom2_qp_fig2.eps']),
#(lamom2_s_fig, [0.9,1.,1], ['lamom2_s1_fig1.pdf','lamom2_s1_fig1.eps']),
#(lamom2_s_fig, [0.85,1.,2], ['lamom2_s2_fig1.pdf','lamom2_s2_fig1.eps']),
#(lo_inhom,[],['lo-inhom.pdf']),
#(trb2_prc_hodd,[],['trb2_prc_hodd.pdf']),
#(trb50_specgram,[],['trb50_specgram.pdf']),
#(trb50_op,[],['trb50_op.pdf']),
#(trb50_specgram_op,[],['network3_ymp.pdf']),
#(fd_diagram,[],['fd_diagram.pdf','fd_diagram.eps']),
]
for fig in figures:
generate_figure(*fig)
if __name__ == "__main__":
main()
| bsd-2-clause |
lukeiwanski/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 23 | 77821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
"""LinearClassifier with SDCAOptimizer with OOV features (-1 IDs)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
# 'GB' is out of the vocabulary.
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_keys(
'country', keys=['US', 'CA', 'MK', 'IT', 'CN'])
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearClassifier with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
print('all scores = {}'.format(scores))
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearRegressor with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0,
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ferdinandvanwyk/gs2_analysis | films.py | 2 | 16388 | import os
import sys
import gc
# Third Party
import numpy as np
from netCDF4 import Dataset
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import pyfilm as pf
plt.rcParams.update({'figure.autolayout': True})
mpl.rcParams['axes.unicode_minus'] = False
# Local
from run import Run
import plot_style
import field_helper as field
plot_style.white()
def phi_film(run, should_normalize):
"""
Create film of electrostatic potential.
Parameters
----------
run : object
Instance of the Run class describing a given simulation
"""
run.read_phi()
if should_normalize:
field.normalize(run.phi)
contours = field.calculate_contours(run.phi)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'phi',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\varphi$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.phi, plot_options=plot_options,
options=options)
run.phi = None
gc.collect()
def ntot_film(run, should_normalize):
"""
Create film of density fluctuations.
"""
run.read_ntot()
if should_normalize:
field.normalize(run.ntot_i)
field.normalize(run.ntot_e)
# Ion density film
contours = field.calculate_contours(run.ntot_i)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'ntot_i',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta n_i / n_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.ntot_i, plot_options=plot_options,
options=options)
run.ntot_i = None
gc.collect()
# Electron density film
contours = field.calculate_contours(run.ntot_e)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'ntot_e',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta n_e / n_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.ntot_e, plot_options=plot_options,
options=options)
run.ntot_e = None
gc.collect()
def upar_film(run, should_normalize):
"""
Make film of parallel velocity.
"""
run.read_upar()
if should_normalize:
field.normalize(run.upar_i)
field.normalize(run.upar_e)
# Ion upar film
contours = field.calculate_contours(run.upar_i)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'upar_i',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$u_{i, \parallel}$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.upar_i, plot_options=plot_options,
options=options)
run.upar_i = None
gc.collect()
# Electron upar film
contours = field.calculate_contours(run.upar_e)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'upar_e',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$u_{e, \parallel}$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.upar_e, plot_options=plot_options,
options=options)
run.upar_e = None
gc.collect()
def v_exb_film(run, should_normalize):
"""
Make film of parallel velocity.
"""
run.calculate_v_exb()
if should_normalize:
field.normalize(run.v_exb)
# Ion upar film
contours = field.calculate_contours(run.v_exb)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'v_exb',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$v_{E \times B}$ (m/s)',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.v_exb, plot_options=plot_options,
options=options)
run.v_exb = None
gc.collect()
def tpar_film(run, should_normalize):
"""
Make film of parallel temperature.
"""
run.read_tpar()
if should_normalize:
field.normalize(run.tpar_i)
field.normalize(run.tpar_e)
contours = field.calculate_contours(run.tpar_i)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'tpar_i',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta T_{i, \parallel} / T_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.tpar_i, plot_options=plot_options,
options=options)
run.tpar_i = None
gc.collect()
contours = field.calculate_contours(run.tpar_e)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'tpar_e',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta T_{e, \parallel} / T_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.tpar_e, plot_options=plot_options,
options=options)
run.tpar_e = None
gc.collect()
def tperp_film(run, should_normalize):
"""
Make film of perpendicular temperature.
"""
run.read_tperp()
if should_normalize:
field.normalize(run.tperp_i)
field.normalize(run.tperp_e)
contours = field.calculate_contours(run.tperp_i)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'tperp_i',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta T_{i, \perp} / T_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.tperp_i, plot_options=plot_options,
options=options)
run.tperp_i = None
gc.collect()
contours = field.calculate_contours(run.tperp_e)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'tperp_e',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$\delta T_{e, \perp} / T_r$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.tperp_e, plot_options=plot_options,
options=options)
run.tperp_e = None
gc.collect()
def heat_flux_film(run, should_normalize):
"""
Make film of local heat flux as a function of x and y.
"""
run.calculate_q()
if should_normalize:
field.normalize(run.q)
contours = field.calculate_contours(run.q)
plot_options = {'levels':contours, 'cmap':'seismic'}
options = {'file_name':'q_i',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'aspect':'equal',
'xlabel':r'$R (m)$',
'ylabel':r'$Z (m)$',
'cbar_ticks':5,
'cbar_label':r'$Q_{i}(x, y) / Q_{gB}$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_2d(run.r, run.z, run.q, plot_options=plot_options,
options=options)
run.q = None
gc.collect()
def radial_heat_flux_film(run, should_normalize):
"""
Make film of the radial heat flux.
"""
run.calculate_q()
run.q_rad = np.mean(run.q, axis=2)
plot_options = {}
options = {'file_name':'q_i_rad',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'xlabel':r'$R (m)$',
'ylabel':r'$\left<Q_{i}(x)\right>_y / Q_{gB}$',
'ylim':0,
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_1d(run.r, run.q_rad, plot_options=plot_options,
options=options)
run.q = None
run.q_rad = None
gc.collect()
def v_zf_film(run, should_normalize):
"""
Make film of zonal flow velocity as a function of x and t.
"""
run.calculate_v_zf()
plot_options = {}
options = {'file_name':'v_zf',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'xlabel':r'$R (m)$',
'ylabel':r'$v_{ZF} / v_{th,i}$',
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_1d(run.r, run.v_zf, plot_options=plot_options,
options=options)
run.v_zf = None
gc.collect()
def zf_shear_film(run, should_normalize):
"""
Make film of zonal flow velocity as a function of x and t.
"""
run.calculate_zf_shear()
plot_options = {}
options = {'file_name':'zf_shear',
'film_dir':run.run_dir + 'analysis/moments',
'frame_dir':run.run_dir + 'analysis/moments/film_frames',
'xlabel':r'$R (m)$',
'ylabel':r"$v'_{ZF} / v_{th,i}$",
'bbox_inches':'tight',
'fps':30}
options['title'] = []
for it in range(run.nt):
options['title'].append(r'Time = {0:04d} $\mu s$'.format(
int(np.round((run.t[it]-run.t[0])*1e6))))
pf.make_film_1d(run.r, run.zf_shear, plot_options=plot_options,
options=options)
run.v_zf = None
gc.collect()
if __name__ == '__main__':
run = Run(sys.argv[1])
try:
case_id = str(sys.argv[2])
except IndexError:
print('Which field do you want to make a film of?')
print('1 : phi')
print('2 : ntot')
print('3 : upar')
print('4 : v_exb')
print('5 : zonal flow velocity')
print('6 : zf velocity shear')
print('7 : tpar')
print('8 : tperp')
print('9 : heat flux')
print('10 : radial heat flux')
print('all : all moments')
case_id = str(input())
try:
user_answer = str(sys.argv[3])
except IndexError:
print('Normalize field? y/n')
user_answer = str(input())
if user_answer == 'y' or user_answer == 'Y':
should_normalize = True
elif user_answer == 'n' or user_answer == 'N':
should_normalize = False
else:
sys.exit('Wrong option.')
if case_id == '1':
phi_film(run, should_normalize)
elif case_id == '2':
ntot_film(run, should_normalize)
elif case_id == '3':
upar_film(run, should_normalize)
elif case_id == '4':
v_exb_film(run, should_normalize)
elif case_id == '5':
v_zf_film(run, should_normalize)
elif case_id == '6':
zf_shear_film(run, should_normalize)
elif case_id == '7':
tpar_film(run, should_normalize)
elif case_id == '8':
tperp_film(run, should_normalize)
elif case_id == '9':
heat_flux_film(run, should_normalize)
elif case_id == '10':
radial_heat_flux_film(run, should_normalize)
elif case_id == 'all':
phi_film(run, should_normalize)
ntot_film(run, should_normalize)
upar_film(run, should_normalize)
v_exb_film(run, should_normalize)
v_zf_film(run, should_normalize)
zf_shear_film(run, should_normalize)
tpar_film(run, should_normalize)
tperp_film(run, should_normalize)
heat_flux_film(run, should_normalize)
radial_heat_flux_film(run, should_normalize)
| gpl-2.0 |
evgchz/scikit-learn | sklearn/utils/tests/test_utils.py | 23 | 6045 | import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import pinv2
from sklearn.utils.testing import (assert_equal, assert_raises, assert_true,
assert_almost_equal, assert_array_equal,
SkipTest)
from sklearn.utils import check_random_state
from sklearn.utils import deprecated
from sklearn.utils import resample
from sklearn.utils import safe_mask
from sklearn.utils import column_or_1d
from sklearn.utils import safe_indexing
from sklearn.utils import shuffle
from sklearn.utils.extmath import pinvh
from sklearn.utils.mocking import MockDataFrame
def test_make_rng():
"""Check the check_random_state utility function behavior"""
assert_true(check_random_state(None) is np.random.mtrand._rand)
assert_true(check_random_state(np.random) is np.random.mtrand._rand)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(42).randint(100) == rng_42.randint(100))
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(rng_42) is rng_42)
rng_42 = np.random.RandomState(42)
assert_true(check_random_state(43).randint(100) != rng_42.randint(100))
assert_raises(ValueError, check_random_state, "some invalid seed")
def test_resample_noarg():
"""Border case not worth mentioning in doctests"""
assert_true(resample() is None)
def test_deprecated():
"""Test whether the deprecated decorator issues appropriate warnings"""
# Copied almost verbatim from http://docs.python.org/library/warnings.html
# First a function...
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated()
def ham():
return "spam"
spam = ham()
assert_equal(spam, "spam") # function must remain usable
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
# ... then a class.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
@deprecated("don't use this")
class Ham(object):
SPAM = 1
ham = Ham()
assert_true(hasattr(ham, "SPAM"))
assert_equal(len(w), 1)
assert_true(issubclass(w[0].category, DeprecationWarning))
assert_true("deprecated" in str(w[0].message).lower())
def test_resample_value_errors():
"""Check that invalid arguments yield ValueError"""
assert_raises(ValueError, resample, [0], [0, 1])
assert_raises(ValueError, resample, [0, 1], [0, 1], n_samples=3)
assert_raises(ValueError, resample, [0, 1], [0, 1], meaning_of_life=42)
def test_safe_mask():
random_state = check_random_state(0)
X = random_state.rand(5, 4)
X_csr = sp.csr_matrix(X)
mask = [False, False, True, True, True]
mask = safe_mask(X, mask)
assert_equal(X[mask].shape[0], 3)
mask = safe_mask(X_csr, mask)
assert_equal(X_csr[mask].shape[0], 3)
def test_pinvh_simple_real():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=np.float64)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_pinvh_nonpositive():
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float64)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_almost_equal(a_pinv, a_pinvh)
def test_pinvh_simple_complex():
a = (np.array([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
+ 1j * np.array([[10, 8, 7], [6, 5, 4], [3, 2, 1]]))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_column_or_1d():
EXAMPLES = [
("binary", ["spam", "egg", "spam"]),
("binary", [0, 1, 0, 1]),
("continuous", np.arange(10) / 20.),
("multiclass", [1, 2, 3]),
("multiclass", [0, 1, 2, 2, 0]),
("multiclass", [[1], [2], [3]]),
("multilabel-indicator", [[0, 1, 0], [0, 0, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("multiclass-multioutput", [[1, 1], [2, 2], [3, 1]]),
("multiclass-multioutput", [[5, 1], [4, 2], [3, 1]]),
("multiclass-multioutput", [[1, 2, 3]]),
("continuous-multioutput", np.arange(30).reshape((-1, 3))),
]
for y_type, y in EXAMPLES:
if y_type in ["binary", 'multiclass', "continuous"]:
assert_array_equal(column_or_1d(y), np.ravel(y))
else:
assert_raises(ValueError, column_or_1d, y)
def test_safe_indexing():
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
inds = np.array([1, 2])
X_inds = safe_indexing(X, inds)
X_arrays = safe_indexing(np.array(X), inds)
assert_array_equal(np.array(X_inds), X_arrays)
assert_array_equal(np.array(X_inds), np.array(X)[inds])
def test_safe_indexing_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not found")
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = pd.DataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_safe_indexing_mock_pandas():
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X_df = MockDataFrame(X)
inds = np.array([1, 2])
X_df_indexed = safe_indexing(X_df, inds)
X_indexed = safe_indexing(X_df, inds)
assert_array_equal(np.array(X_df_indexed), X_indexed)
def test_shuffle_on_ndim_equals_three():
def to_tuple(A): # to make the inner arrays hashable
return tuple(tuple(tuple(C) for C in B) for B in A)
A = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # A.shape = (2,2,2)
S = set(to_tuple(A))
shuffle(A) # shouldn't raise a ValueError for dim = 3
assert_equal(set(to_tuple(A)), S)
| bsd-3-clause |
aleksandr-bakanov/astropy | astropy/wcs/wcsapi/low_level_api.py | 3 | 14978 | import os
import abc
import numpy as np
__all__ = ['BaseLowLevelWCS', 'validate_physical_types']
class BaseLowLevelWCS(metaclass=abc.ABCMeta):
"""
Abstract base class for the low-level WCS interface.
This is described in `APE 14: A shared Python interface for World Coordinate
Systems <https://doi.org/10.5281/zenodo.1188875>`_.
"""
@property
@abc.abstractmethod
def pixel_n_dim(self):
"""
The number of axes in the pixel coordinate system.
"""
@property
@abc.abstractmethod
def world_n_dim(self):
"""
The number of axes in the world coordinate system.
"""
@property
@abc.abstractmethod
def world_axis_physical_types(self):
"""
An iterable of strings describing the physical type for each world axis.
These should be names from the VO UCD1+ controlled Vocabulary
(http://www.ivoa.net/documents/latest/UCDlist.html). If no matching UCD
type exists, this can instead be ``"custom:xxx"``, where ``xxx`` is an
arbitrary string. Alternatively, if the physical type is
unknown/undefined, an element can be `None`.
"""
@property
@abc.abstractmethod
def world_axis_units(self):
"""
An iterable of strings given the units of the world coordinates for each
axis.
The strings should follow the `IVOA VOUnit standard
<http://ivoa.net/documents/VOUnits/>`_ (though as noted in the VOUnit
specification document, units that do not follow this standard are still
allowed, but just not recommended).
"""
@abc.abstractmethod
def pixel_to_world_values(self, *pixel_arrays):
"""
Convert pixel coordinates to world coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays as
input, and pixel coordinates should be zero-based. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays in units given by
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Note that pixel coordinates are
assumed to be 0 at the center of the first pixel in each dimension. If a
pixel is in a region where the WCS is not defined, NaN can be returned.
The coordinates should be specified in the ``(x, y)`` order, where for
an image, ``x`` is the horizontal coordinate and ``y`` is the vertical
coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def array_index_to_world_values(self, *index_arrays):
"""
Convert array indices to world coordinates.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values` except that
the indices should be given in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`).
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_pixel_values(self, *world_arrays):
"""
Convert world coordinates to pixel coordinates.
This method takes `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` scalars or arrays as
input in units given by `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_units`. Returns
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` scalars or arrays. Note that pixel
coordinates are assumed to be 0 at the center of the first pixel in each
dimension. If a world coordinate does not have a matching pixel
coordinate, NaN can be returned. The coordinates should be returned in
the ``(x, y)`` order, where for an image, ``x`` is the horizontal
coordinate and ``y`` is the vertical coordinate.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@abc.abstractmethod
def world_to_array_index_values(self, *world_arrays):
"""
Convert world coordinates to array indices.
This is the same as `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_to_pixel_values` except that
the indices should be returned in ``(i, j)`` order, where for an image
``i`` is the row and ``j`` is the column (i.e. the opposite order to
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_to_world_values`). The indices should be
returned as rounded integers.
If `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` is ``1``, this
method returns a single scalar or array, otherwise a tuple of scalars or
arrays is returned.
"""
@property
@abc.abstractmethod
def world_axis_object_components(self):
"""
A list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim` elements giving information
on constructing high-level objects for the world coordinates.
Each element of the list is a tuple with three items:
* The first is a name for the world object this world array
corresponds to, which *must* match the string names used in
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`. Note that names might
appear twice because two world arrays might correspond to a single
world object (e.g. a celestial coordinate might have both “ra” and
“dec” arrays, which correspond to a single sky coordinate object).
* The second element is either a string keyword argument name or a
positional index for the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes`.
* The third argument is a string giving the name of the property
to access on the corresponding class from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_classes` in
order to get numerical values. Alternatively, this argument can be a
callable Python object that taks a high-level coordinate object and
returns the numerical values suitable for passing to the low-level
WCS transformation methods.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples.
"""
@property
@abc.abstractmethod
def world_axis_object_classes(self):
"""
A dictionary giving information on constructing high-level objects for
the world coordinates.
Each key of the dictionary is a string key from
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components`, and each value is a
tuple with three elements or four elements:
* The first element of the tuple must be a class or a string specifying
the fully-qualified name of a class, which will specify the actual
Python object to be created.
* The second element, should be a tuple specifying the positional
arguments required to initialize the class. If
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_object_components` specifies that the
world coordinates should be passed as a positional argument, this this
tuple should include `None` placeholders for the world coordinates.
* The third tuple element must be a dictionary with the keyword
arguments required to initialize the class.
* Optionally, for advanced use cases, the fourth element (if present)
should be a callable Python object that gets called instead of the
class and gets passed the positional and keyword arguments. It should
return an object of the type of the first element in the tuple.
Note that we don't require the classes to be Astropy classes since there
is no guarantee that Astropy will have all the classes to represent all
kinds of world coordinates. Furthermore, we recommend that the output be
kept as human-readable as possible.
The classes used here should have the ability to do conversions by
passing an instance as the first argument to the same class with
different arguments (e.g. ``Time(Time(...), scale='tai')``). This is
a requirement for the implementation of the high-level interface.
The second and third tuple elements for each value of this dictionary
can in turn contain either instances of classes, or if necessary can
contain serialized versions that should take the same form as the main
classes described above (a tuple with three elements with the fully
qualified name of the class, then the positional arguments and the
keyword arguments). For low-level API objects implemented in Python, we
recommend simply returning the actual objects (not the serialized form)
for optimal performance. Implementations should either always or never
use serialized classes to represent Python objects, and should indicate
which of these they follow using the
`~astropy.wcs.wcsapi.BaseLowLevelWCS.serialized_classes` attribute.
See the document
`APE 14: A shared Python interface for World Coordinate Systems
<https://doi.org/10.5281/zenodo.1188875>`_ for examples .
"""
# The following three properties have default fallback implementations, so
# they are not abstract.
@property
def array_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(row, column)``
order (the convention for arrays in Python).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
"""
return None
@property
def pixel_shape(self):
"""
The shape of the data that the WCS applies to as a tuple of length
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim` in ``(x, y)``
order (where for an image, ``x`` is the horizontal coordinate and ``y``
is the vertical coordinate).
If the WCS is valid in the context of a dataset with a particular
shape, then this property can be used to store the shape of the
data. This can be used for example if implementing slicing of WCS
objects. This is an optional property, and it should return `None`
if a shape is not known or relevant.
If you are interested in getting a shape that is comparable to that of
a Numpy array, you should use
`~astropy.wcs.wcsapi.BaseLowLevelWCS.array_shape` instead.
"""
return None
@property
def pixel_bounds(self):
"""
The bounds (in pixel coordinates) inside which the WCS is defined,
as a list with `~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`
``(min, max)`` tuples.
The bounds should be given in ``[(xmin, xmax), (ymin, ymax)]``
order. WCS solutions are sometimes only guaranteed to be accurate
within a certain range of pixel values, for example when defining a
WCS that includes fitted distortions. This is an optional property,
and it should return `None` if a shape is not known or relevant.
"""
return None
@property
def pixel_axis_names(self):
"""
An iterable of strings describing the name for each pixel axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized.
"""
return [''] * self.pixel_n_dim
@property
def world_axis_names(self):
"""
An iterable of strings describing the name for each world axis.
If an axis does not have a name, an empty string should be returned
(this is the default behavior for all axes if a subclass does not
override this property). Note that these names are just for display
purposes and are not standardized. For standardized axis types, see
`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_axis_physical_types`.
"""
return [''] * self.world_n_dim
@property
def axis_correlation_matrix(self):
"""
Returns an (`~astropy.wcs.wcsapi.BaseLowLevelWCS.world_n_dim`,
`~astropy.wcs.wcsapi.BaseLowLevelWCS.pixel_n_dim`) matrix that
indicates using booleans whether a given world coordinate depends on a
given pixel coordinate.
This defaults to a matrix where all elements are `True` in the absence
of any further information. For completely independent axes, the
diagonal would be `True` and all other entries `False`.
"""
return np.ones((self.world_n_dim, self.pixel_n_dim), dtype=bool)
@property
def serialized_classes(self):
"""
Indicates whether Python objects are given in serialized form or as
actual Python objects.
"""
return False
def _as_mpl_axes(self):
"""
Compatibility hook for Matplotlib and WCSAxes. With this method, one can
do::
from astropy.wcs import WCS
import matplotlib.pyplot as plt
wcs = WCS('filename.fits')
fig = plt.figure()
ax = fig.add_axes([0.15, 0.1, 0.8, 0.8], projection=wcs)
...
and this will generate a plot with the correct WCS coordinates on the
axes.
"""
from astropy.visualization.wcsaxes import WCSAxes
return WCSAxes, {'wcs': self}
UCDS_FILE = os.path.join(os.path.dirname(__file__), 'data', 'ucds.txt')
with open(UCDS_FILE) as f:
VALID_UCDS = set([x.strip() for x in f.read().splitlines()[1:]])
def validate_physical_types(physical_types):
"""
Validate a list of physical types against the UCD1+ standard
"""
for physical_type in physical_types:
if (physical_type is not None and
physical_type not in VALID_UCDS and
not physical_type.startswith('custom:')):
raise ValueError(f"Invalid physical type: {physical_type}")
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/cluster/tests/test_k_means.py | 8 | 25170 | """Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(RuntimeWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
"""Check that dense and sparse minibatch update give the same results"""
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_check_fitted():
km = KMeans(n_clusters=n_clusters, random_state=42)
assert_raises(AttributeError, km._check_fitted)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
def _has_blas_lib(libname):
from numpy.distutils.system_info import get_info
return libname in get_info('blas_opt').get('libraries', [])
@if_not_mac_os()
def test_k_means_plus_plus_init_2_jobs():
if _has_blas_lib('openblas'):
raise SkipTest('Multi-process bug with OpenBLAS (see issue #636)')
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, verbose=10, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, verbose=10,
init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
"""Check if copy_x=False returns nearly equal X after de-centering."""
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
"""Check k_means with a bad initialization does not yield a singleton
Starting with bad centers that are quickly ignored should not
result in a repositioning of the centers to the center of mass that
would lead to collapsed centers which in turns make the clustering
dependent of the numerical unstabilities.
"""
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
"""Check that increasing the number of init increases the quality"""
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
| bsd-3-clause |
RLPAgroScience/ROIseries | tests/test_ROIseries.py | 1 | 8862 | import io
import ROIseries as rs
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from sklearn.pipeline import make_pipeline
from sklearn.metrics import confusion_matrix
import numpy as np
# ----------------------------------------------------------------------------------------------------------------------
# Fixtures
# note: as DataFrames are mutable do not use scope="module" to prevent interactions between tests
@pytest.fixture()
def df():
df1 = pd.DataFrame([[10, 11, 12, 13, 14], [70, 71, 72, 73, 74], [50, 51, 52, 53, 54]],
["ID_1", "ID_7", "ID_5"],
["Feature_1_2457350.0000000000", "Feature_1_2457362.0000000000",
"Feature_1_2457374.0000000000", "Feature_1_2457386.0000000000",
"Feature_1_2457398.0000000000"])
df2 = pd.DataFrame([[15, 16, 17, 18, 19], [75, 76, 77, 78, 79], [55, 56, 57, 58, 59]],
["ID_1", "ID_7", "ID_5"],
["Feature_2_2457350.0000000000", "Feature_2_2457362.0000000000",
"Feature_2_2457374.0000000000", "Feature_2_2457386.0000000000",
"Feature_2_2457398.0000000000"])
df = pd.concat([df1, df2], axis=1)
df.index.names = ['ID']
df.columns.names = ['Feature_Time']
return df
@pytest.fixture()
def time(df):
df_time = rs.feature_transformers.timeindex_from_colsuffix(df)
time = df_time.index.get_level_values('time')
return time
@pytest.fixture()
def df_trf():
csv = """,ID_1,ID_1,ID_1,ID_1,ID_1,ID_1,ID_7,ID_7,ID_7,ID_7,ID_7,ID_7,ID_5,ID_5,ID_5,ID_5,ID_5,ID_5
,Feature_1,Feature_1,Feature_1,Feature_2,Feature_2,Feature_2,Feature_1,Feature_1,Feature_1,Feature_2,Feature_2,Feature_2,Feature_1,Feature_1,Feature_1,Feature_2,Feature_2,Feature_2
,m1,m2,p1,m1,m2,p1,m1,m2,p1,m1,m2,p1,m1,m2,p1,m1,m2,p1
2015-11-23,10,NA,11,15,NA,16,70,NA,71,75,NA,76,50,NA,51,55,NA,56
2015-12-05,11,10,12,16,15,17,71,70,72,76,75,77,51,50,52,56,55,57
2015-12-17,12,11,13,17,16,18,72,71,73,77,76,78,52,51,53,57,56,58
2015-12-29,13,12,14,18,17,19,73,72,74,78,77,79,53,52,54,58,57,59
2016-01-10,14,13,NA,19,18,NA,74,73,NA,79,78,NA,54,53,NA,59,58,NA
"""
df_trf = pd.read_csv(io.StringIO(csv), index_col=[0], header=[0, 1, 2])
df_trf.index = pd.DatetimeIndex(df_trf.index, name='time', freq='12D')
df_trf.columns.names = ['ID', 'feature', 'trf_label']
rs.sub_routines.sort_index_columns_inplace(df_trf)
df_trf = df_trf.stack('ID')
return df_trf
@pytest.fixture()
def metrics():
s_1_true = [True, True, False, False, False, False, False, False, False, True, True, True, True, True]
s_1_pred = [True, True, False, False, False, False, False, False, True, False, False, False, False, False]
s_1_tn, s_1_fp, s_1_fn, s_1_tp = confusion_matrix(s_1_true, s_1_pred).ravel()
s_1_n = len(s_1_true)
s_2_true = [True, True, True, True, False, False, False, False, False, False, False, False, False, False, False, True, True, True, True, True, True, True]
s_2_pred = [True, True, True, True, False, False, False, False, False, False, False, False, True, True, True, False, False, False, False, False, False, False]
s_2_tn, s_2_fp, s_2_fn, s_2_tp = confusion_matrix(s_2_true, s_2_pred).ravel()
s_2_n = len(s_2_true)
y_true = pd.Series(np.array(s_1_true + s_2_true),
index=pd.Index(np.array(['s_1'] * len(s_1_true) + ['s_2'] * len(s_2_true)), name='strata'))
y_pred = np.array(s_1_pred + s_2_pred)
metrics = {'y_true':y_true, 'y_pred':y_pred,
"s_1_tn":s_1_tn, "s_1_fp":s_1_fp, "s_1_fn":s_1_fn, "s_1_tp":s_1_tp, "s_1_n":s_1_n,
"s_2_tn":s_2_tn, "s_2_fp":s_2_fp, "s_2_fn":s_2_fn, "s_2_tp":s_2_tp, "s_2_n":s_2_n}
return metrics
# ----------------------------------------------------------------------------------------------------------------------
# Tests
def test_timeindex_from_colsuffix_SideEffects(df):
df_copy = df.copy()
_ = rs.feature_transformers.timeindex_from_colsuffix(df)
assert_frame_equal(df, df_copy)
def test_timeindex_from_colsuffix_datetime(df):
result = rs.feature_transformers.timeindex_from_colsuffix(df)
assert type(result.index) == pd.core.indexes.datetimes.DatetimeIndex
def test_reltime_from_absdate_freq(time):
reltime, freq = rs.feature_transformers.reltime_from_absdate(time)
assert freq == '12D'
def test_reltime_from_absdate_reltime(time):
reltime, freq = rs.feature_transformers.reltime_from_absdate(time)
assert reltime.equals(pd.Index([0.0, 1.0, 2.0, 3.0, 4.0], dtype='float64', name='reltime'))
def test_trf_SideEffects(df, df_trf):
df_copy = df.copy()
df_time = rs.feature_transformers.timeindex_from_colsuffix(df).stack('ID')
shift_dict = dict(zip(["m2", "m1", "p1"], [-1, 0, 1]))
t1 = rs.feature_transformers.TAFtoTRF(shift_dict, "ID")
p1 = make_pipeline(t1)
result = p1.fit_transform(df_time)
assert_frame_equal(df, df_copy)
def test_trf_result(df, df_trf):
df_time = rs.feature_transformers.timeindex_from_colsuffix(df)
rs.sub_routines.sort_index_columns_inplace(df_time)
df_time = df_time.stack('ID')
shift_dict = dict(zip(["m2", "m1", "p1"], [-1, 0, 1]))
t1 = rs.feature_transformers.TAFtoTRF(shift_dict, 'ID')
p1 = make_pipeline(t1)
result = p1.fit_transform(df_time)
# integers are converted to float during shifting to allow NaN values, which is expected behaviour
assert_frame_equal(result, df_trf, check_dtype=False)
def test_doy_circular():
""" doy_circular should return evenly distributed euclidean 2D distances across (leap) years """
doy_circular = rs.feature_transformers.doy_circular(pd.date_range('2015-01-01', '2016-12-31'))
doy_sin_diff = np.diff(doy_circular['doy_sin']) ** 2
doy_cos_diff = np.diff(doy_circular['doy_cos']) ** 2
distance = np.sqrt(doy_sin_diff + doy_cos_diff)
# leap / no leap years have a slightly different distance between days, which is expected:
leap_diff = ((1 / 365) - (1 / 366)) * np.pi
# figure out the significant number of digits: position of the first decimal place where leap_diff is not 0
sign_digit = (np.where(np.array([int((10 ** i) * leap_diff) for i in range(1, 10)])))[0][0]
# assert that there is only one unique distances (considering the sign_digits)
assert len(np.unique(np.round(distance, sign_digit))) == 1
def test_errors_per_stratum_count(metrics):
m = metrics
# n_errors, n_samples = ([s_1_fp + s_1_fn, s_2_fp + s_2_fn], [len(s_1_true), len(s_2_true)])
n_errors = rs.scoring_metrics.errors_per_stratum_count(m["y_true"], m["y_pred"], "strata")
assert n_errors == np.mean([m["s_1_fp"] + m["s_1_fn"], m["s_2_fp"] + m["s_2_fn"]])
def test_errors_per_stratum_count_normalize(metrics):
m = metrics
normalize_denominator = 7 # e.g. days/week
fraction_s_1 = m['s_1_n'] / normalize_denominator
fraction_s_2 = m['s_2_n'] / normalize_denominator
normalized_errors = np.mean([(m["s_1_fp"] + m["s_1_fn"]) / fraction_s_1,
(m["s_2_fp"] + m["s_2_fn"]) / fraction_s_2])
n_errors = rs.scoring_metrics.errors_per_stratum_count(m["y_true"], m["y_pred"], "strata",
normalize_denominator=normalize_denominator)
assert n_errors == normalized_errors
def test_idx_corners():
size = 3
arr = np.zeros((size, size))
directions_results = {
'up_right': np.array([
[1, 1, 1],
[0, 1, 1],
[0, 0, 1]]),
'down_right': np.array([
[0, 0, 1],
[0, 1, 1],
[1, 1, 1]]),
'down_left': np.array([
[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]),
'up_left': np.array([
[1, 1, 1],
[1, 1, 0],
[1, 0, 0]])
}
for direction, result in directions_results.items():
temp = arr.copy()
temp[rs.sub_routines.idx_corners(size, direction)] = 1
np.testing.assert_array_equal(temp, result)
def test_drop_correlated():
ten = np.arange(10)
ten_reverse = ten[::-1]
ten_drop = ten.copy()
ten_drop[3:6] = [5, 4, 3]
arr = (np.array([ten, ten_reverse, ten_drop])).transpose()
df = pd.DataFrame(np.concatenate([arr, arr], axis=1))
transformer = rs.feature_transformers.DropCorrelated(df.corr(), 0.9, absolute_correlation=False)
result = transformer.fit_transform(df)
pd.testing.assert_frame_equal(result, pd.DataFrame(np.array([ten, ten_reverse]).transpose()))
transformer = rs.feature_transformers.DropCorrelated(df.corr(), 0.9, absolute_correlation=True)
result = transformer.fit_transform(df)
pd.testing.assert_frame_equal(result, pd.DataFrame(np.array([ten]).transpose()))
| agpl-3.0 |
asurve/incubator-systemml | scripts/perftest/python/google_docs/stats.py | 15 | 3540 | #!/usr/bin/env python3
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
import argparse
import os
import pprint
from os.path import join
import matplotlib.pyplot as plt
from gdocs_utils import auth
# Dict
# {algo_name : [algo_1.0': t1, 'algo_2.0': t2]}
def get_formatted_data(sheet_data):
"""
Read all the data from google sheets and transforms it into a dictionary that can be
use for plotting later
"""
algo_dict = {}
for i in sheet_data:
inn_count = 0
data = []
for key, val in i.items():
inn_count += 1
if inn_count < 3:
data.append(key)
data.append(val)
if inn_count == 2:
t1, v1, _, v2 = data
if len(str(v2)) > 0:
if v1 not in algo_dict:
algo_dict[v1] = [{t1: v2}]
else:
algo_dict[v1].append({t1: v2})
inn_count = 0
data = []
return algo_dict
def plot(x, y, xlab, ylab, title):
"""
Save plots to the current folder based on the arguments
"""
CWD = os.getcwd()
PATH = join(CWD, title)
width = .35
plt.bar(x, y, color="red", width=width)
plt.xticks(x)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.savefig(PATH + '.png')
print('Plot {} generated'.format(title))
return plt
# Example Usage
# ./stats.py --auth ../key/client_json.json --exec-mode singlenode
if __name__ == '__main__':
execution_mode = ['hybrid_spark', 'singlenode']
cparser = argparse.ArgumentParser(description='System-ML Statistics Script')
cparser.add_argument('--auth', help='Location to read auth file',
required=True, metavar='')
cparser.add_argument('--exec-type', help='Execution mode', choices=execution_mode,
required=True, metavar='')
cparser.add_argument('--plot', help='Algorithm to plot', metavar='')
args = cparser.parse_args()
sheet = auth(args.auth, args.exec_type)
all_data = sheet.get_all_records()
plot_data = get_formatted_data(all_data)
if args.plot is not None:
print(plot_data[args.plot])
title = args.plot
ylab = 'Time in sec'
xlab = 'Version'
x = []
y = []
for i in plot_data[args.plot]:
version = list(i.keys())[0]
time = list(i.values())[0]
y.append(time)
x.append(version)
x = list(map(lambda x: float(x.split('_')[1]), x))
plot(x, y, xlab, ylab, title)
else:
pprint.pprint(plot_data, width=1) | apache-2.0 |
xyguo/scikit-learn | examples/neural_networks/plot_mnist_filters.py | 57 | 2195 | """
=====================================
Visualization of MLP weights on MNIST
=====================================
Sometimes looking at the learned coefficients of a neural network can provide
insight into the learning behavior. For example if weights look unstructured,
maybe some were not used at all, or if very large coefficients exist, maybe
regularization was too low or the learning rate too high.
This example shows how to plot some of the first layer weights in a
MLPClassifier trained on the MNIST dataset.
The input data consists of 28x28 pixel handwritten digits, leading to 784
features in the dataset. Therefore the first layer weight matrix have the shape
(784, hidden_layer_sizes[0]). We can therefore visualize a single column of
the weight matrix as a 28x28 pixel image.
To make the example run faster, we use very few hidden units, and train only
for a very short time. Training longer would result in weights with a much
smoother spatial appearance.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_mldata
from sklearn.neural_network import MLPClassifier
mnist = fetch_mldata("MNIST original")
# rescale the data, use the traditional train/test split
X, y = mnist.data / 255., mnist.target
X_train, X_test = X[:60000], X[60000:]
y_train, y_test = y[:60000], y[60000:]
# mlp = MLPClassifier(hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
# algorithm='sgd', verbose=10, tol=1e-4, random_state=1)
mlp = MLPClassifier(hidden_layer_sizes=(50,), max_iter=10, alpha=1e-4,
algorithm='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
mlp.fit(X_train, y_train)
print("Training set score: %f" % mlp.score(X_train, y_train))
print("Test set score: %f" % mlp.score(X_test, y_test))
fig, axes = plt.subplots(4, 4)
# use global min / max to ensure all weights are shown on the same scale
vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()
for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()):
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=.5 * vmin,
vmax=.5 * vmax)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.15/_downloads/make_report.py | 5 | 1576 | """
================================
Make an MNE-Report with a Slider
================================
In this example, MEG evoked data are plotted in an html slider.
"""
# Authors: Teon Brooks <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from mne.report import Report
from mne.datasets import sample
from mne import read_evokeds
from matplotlib import pyplot as plt
data_path = sample.data_path()
meg_path = data_path + '/MEG/sample'
subjects_dir = data_path + '/subjects'
evoked_fname = meg_path + '/sample_audvis-ave.fif'
###############################################################################
# Do standard folder parsing (this can take a couple of minutes):
report = Report(image_format='png', subjects_dir=subjects_dir,
info_fname=evoked_fname, subject='sample')
report.parse_folder(meg_path)
###############################################################################
# Add a custom section with an evoked slider:
# Load the evoked data
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(None, 0), verbose=False)
evoked.crop(0, .2)
times = evoked.times[::4]
# Create a list of figs for the slider
figs = list()
for t in times:
figs.append(evoked.plot_topomap(t, vmin=-300, vmax=300, res=100,
show=False))
plt.close(figs[-1])
report.add_slider_to_section(figs, times, 'Evoked Response',
image_format='svg')
# to save report
# report.save('foobar.html', True)
| bsd-3-clause |
bert9bert/statsmodels | statsmodels/tsa/base/tests/test_datetools.py | 4 | 3286 | from datetime import datetime
from pandas import DatetimeIndex
import numpy.testing as npt
from statsmodels.tsa.base.datetools import (
date_parser, date_range_str, dates_from_str, dates_from_range)
from pandas import DatetimeIndex, PeriodIndex
def test_regex_matching_month():
t1 = "1999m4"
t2 = "1999:m4"
t3 = "1999:mIV"
t4 = "1999mIV"
result = datetime(1999, 4, 30)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_regex_matching_quarter():
t1 = "1999q4"
t2 = "1999:q4"
t3 = "1999:qIV"
t4 = "1999qIV"
result = datetime(1999, 12, 31)
npt.assert_equal(date_parser(t1), result)
npt.assert_equal(date_parser(t2), result)
npt.assert_equal(date_parser(t3), result)
npt.assert_equal(date_parser(t4), result)
def test_dates_from_range():
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 12, 31, 0, 0),
datetime(1962, 3, 31, 0, 0),
datetime(1962, 6, 30, 0, 0)]
dt_range = dates_from_range('1959q1', '1962q2')
npt.assert_(results == dt_range)
# test with starting period not the first with length
results = results[2:]
dt_range = dates_from_range('1959q3', length=len(results))
npt.assert_(results == dt_range)
# check month
results = [datetime(1959, 3, 31, 0, 0),
datetime(1959, 4, 30, 0, 0),
datetime(1959, 5, 31, 0, 0),
datetime(1959, 6, 30, 0, 0),
datetime(1959, 7, 31, 0, 0),
datetime(1959, 8, 31, 0, 0),
datetime(1959, 9, 30, 0, 0),
datetime(1959, 10, 31, 0, 0),
datetime(1959, 11, 30, 0, 0),
datetime(1959, 12, 31, 0, 0),
datetime(1960, 1, 31, 0, 0),
datetime(1960, 2, 28, 0, 0),
datetime(1960, 3, 31, 0, 0),
datetime(1960, 4, 30, 0, 0),
datetime(1960, 5, 31, 0, 0),
datetime(1960, 6, 30, 0, 0),
datetime(1960, 7, 31, 0, 0),
datetime(1960, 8, 31, 0, 0),
datetime(1960, 9, 30, 0, 0),
datetime(1960, 10, 31, 0, 0),
datetime(1960, 12, 31, 0, 0),
datetime(1961, 1, 31, 0, 0),
datetime(1961, 2, 28, 0, 0),
datetime(1961, 3, 31, 0, 0),
datetime(1961, 4, 30, 0, 0),
datetime(1961, 5, 31, 0, 0),
datetime(1961, 6, 30, 0, 0),
datetime(1961, 7, 31, 0, 0),
datetime(1961, 8, 31, 0, 0),
datetime(1961, 9, 30, 0, 0),
datetime(1961, 10, 31, 0, 0)]
dt_range = dates_from_range("1959m3", length=len(results))
| bsd-3-clause |
deepfield/ibis | ibis/clickhouse/client.py | 1 | 12156 | import re
import numpy as np
import pandas as pd
from collections import OrderedDict
import ibis.common as com
import ibis.expr.types as ir
import ibis.expr.schema as sch
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.config import options
from ibis.compat import zip as czip, parse_version
from ibis.client import Query, Database, DatabaseEntity, SQLClient
from ibis.clickhouse.compiler import ClickhouseDialect, build_ast
from ibis.util import log
from ibis.sql.compiler import DDL
from clickhouse_driver.client import Client as _DriverClient
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
base_typename_re = re.compile(r"(\w+)")
_clickhouse_dtypes = {
'Null': dt.Null,
'Nothing': dt.Null,
'UInt8': dt.UInt8,
'UInt16': dt.UInt16,
'UInt32': dt.UInt32,
'UInt64': dt.UInt64,
'Int8': dt.Int8,
'Int16': dt.Int16,
'Int32': dt.Int32,
'Int64': dt.Int64,
'Float32': dt.Float32,
'Float64': dt.Float64,
'String': dt.String,
'FixedString': dt.String,
'Date': dt.Date,
'DateTime': dt.Timestamp
}
_ibis_dtypes = {v: k for k, v in _clickhouse_dtypes.items()}
_ibis_dtypes[dt.String] = 'String'
class ClickhouseDataType(object):
__slots__ = 'typename', 'nullable'
def __init__(self, typename, nullable=False):
m = base_typename_re.match(typename)
base_typename = m.groups()[0]
if base_typename not in _clickhouse_dtypes:
raise com.UnsupportedBackendType(typename)
self.typename = base_typename
self.nullable = nullable
def __str__(self):
if self.nullable:
return 'Nullable({})'.format(self.typename)
else:
return self.typename
def __repr__(self):
return '<Clickhouse {}>'.format(str(self))
@classmethod
def parse(cls, spec):
# TODO(kszucs): spare parsing, depends on clickhouse-driver#22
if spec.startswith('Nullable'):
return cls(spec[9:-1], nullable=True)
else:
return cls(spec)
def to_ibis(self):
return _clickhouse_dtypes[self.typename](nullable=self.nullable)
@classmethod
def from_ibis(cls, dtype, nullable=None):
typename = _ibis_dtypes[type(dtype)]
if nullable is None:
nullable = dtype.nullable
return cls(typename, nullable=nullable)
@dt.dtype.register(ClickhouseDataType)
def clickhouse_to_ibis_dtype(clickhouse_dtype):
return clickhouse_dtype.to_ibis()
class ClickhouseDatabase(Database):
pass
class ClickhouseQuery(Query):
def _external_tables(self):
tables = []
for name, df in self.extra_options.get('external_tables', {}).items():
if not isinstance(df, pd.DataFrame):
raise TypeError('External table is not an instance of pandas '
'dataframe')
schema = sch.infer(df)
chtypes = map(ClickhouseDataType.from_ibis, schema.types)
structure = list(zip(schema.names, map(str, chtypes)))
tables.append(dict(name=name,
data=df.to_dict('records'),
structure=structure))
return tables
def execute(self):
cursor = self.client._execute(
self.compiled_sql,
external_tables=self._external_tables()
)
result = self._fetch(cursor)
return self._wrap_result(result)
def _fetch(self, cursor):
data, colnames, _ = cursor
if not len(data):
# handle empty resultset
return pd.DataFrame([], columns=colnames)
df = pd.DataFrame.from_dict(
OrderedDict(zip(colnames, data))
)
return self.schema().apply_to(df)
class ClickhouseTable(ir.TableExpr, DatabaseEntity):
"""References a physical table in Clickhouse"""
@property
def _qualified_name(self):
return self.op().args[0]
@property
def _unqualified_name(self):
return self._match_name()[1]
@property
def _client(self):
return self.op().args[2]
def _match_name(self):
m = fully_qualified_re.match(self._qualified_name)
if not m:
raise com.IbisError('Cannot determine database name from {0}'
.format(self._qualified_name))
db, quoted, unquoted = m.groups()
return db, quoted or unquoted
@property
def _database(self):
return self._match_name()[0]
def invalidate_metadata(self):
self._client.invalidate_metadata(self._qualified_name)
def metadata(self):
"""
Return parsed results of DESCRIBE FORMATTED statement
Returns
-------
meta : TableMetadata
"""
return self._client.describe_formatted(self._qualified_name)
describe_formatted = metadata
@property
def name(self):
return self.op().name
def _execute(self, stmt):
return self._client._execute(stmt)
def insert(self, obj, **kwargs):
from .identifiers import quote_identifier
schema = self.schema()
assert isinstance(obj, pd.DataFrame)
assert set(schema.names) >= set(obj.columns)
columns = ', '.join(map(quote_identifier, obj.columns))
query = 'INSERT INTO {table} ({columns}) VALUES'.format(
table=self._qualified_name, columns=columns)
# convert data columns with datetime64 pandas dtype to native date
# because clickhouse-driver 0.0.10 does arithmetic operations on it
obj = obj.copy()
for col in obj.select_dtypes(include=[np.datetime64]):
if isinstance(schema[col], dt.Date):
obj[col] = obj[col].dt.date
data = obj.to_dict('records')
return self._client.con.process_insert_query(query, data, **kwargs)
class ClickhouseDatabaseTable(ops.DatabaseTable):
pass
class ClickhouseClient(SQLClient):
"""An Ibis client interface that uses Clickhouse"""
database_class = ClickhouseDatabase
query_class = ClickhouseQuery
dialect = ClickhouseDialect
table_class = ClickhouseDatabaseTable
table_expr_class = ClickhouseTable
def __init__(self, *args, **kwargs):
self.con = _DriverClient(*args, **kwargs)
def _build_ast(self, expr, context):
return build_ast(expr, context)
@property
def current_database(self):
# might be better to use driver.Connection instead of Client
return self.con.connection.database
def log(self, msg):
log(msg)
def close(self):
"""Close Clickhouse connection and drop any temporary objects"""
self.con.disconnect()
def _execute(self, query, external_tables=(), results=True):
if isinstance(query, DDL):
query = query.compile()
self.log(query)
response = self.con.process_ordinary_query(
query, columnar=True, with_column_types=True,
external_tables=external_tables
)
if not results:
return response
data, columns = response
colnames, typenames = czip(*columns)
coltypes = list(map(ClickhouseDataType.parse, typenames))
return data, colnames, coltypes
def _fully_qualified_name(self, name, database):
if bool(fully_qualified_re.search(name)):
return name
database = database or self.current_database
return '{0}.`{1}`'.format(database, name)
def list_tables(self, like=None, database=None):
"""
List tables in the current (or indicated) database. Like the SHOW
TABLES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
database : string, default None
If not passed, uses the current/default database
Returns
-------
tables : list of strings
"""
statement = 'SHOW TABLES'
if database:
statement += " FROM `{0}`".format(database)
if like:
m = fully_qualified_re.match(like)
if m:
database, quoted, unquoted = m.groups()
like = quoted or unquoted
return self.list_tables(like=like, database=database)
statement += " LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0]
def set_database(self, name):
"""
Set the default database scope for client
"""
self.con.database = name
def exists_database(self, name):
"""
Checks if a given database exists
Parameters
----------
name : string
Database name
Returns
-------
if_exists : boolean
"""
return len(self.list_databases(like=name)) > 0
def list_databases(self, like=None):
"""
List databases in the Clickhouse cluster.
Like the SHOW DATABASES command in the clickhouse-shell.
Parameters
----------
like : string, default None
e.g. 'foo*' to match all tables starting with 'foo'
Returns
-------
databases : list of strings
"""
statement = 'SELECT name FROM system.databases'
if like:
statement += " WHERE name LIKE '{0}'".format(like)
data, _, _ = self.raw_sql(statement, results=True)
return data[0]
def get_schema(self, table_name, database=None):
"""
Return a Schema object for the indicated table and database
Parameters
----------
table_name : string
May be fully qualified
database : string, default None
Returns
-------
schema : ibis Schema
"""
qualified_name = self._fully_qualified_name(table_name, database)
query = 'DESC {0}'.format(qualified_name)
data, _, _ = self.raw_sql(query, results=True)
colnames, coltypes = data[:2]
coltypes = list(map(ClickhouseDataType.parse, coltypes))
return sch.schema(colnames, coltypes)
@property
def client_options(self):
return self.con.options
def set_options(self, options):
self.con.set_options(options)
def reset_options(self):
# Must nuke all cursors
raise NotImplementedError
def exists_table(self, name, database=None):
"""
Determine if the indicated table or view exists
Parameters
----------
name : string
database : string, default None
Returns
-------
if_exists : boolean
"""
return len(self.list_tables(like=name, database=database)) > 0
def _ensure_temp_db_exists(self):
name = options.clickhouse.temp_db,
if not self.exists_database(name):
self.create_database(name, force=True)
def _get_table_schema(self, tname):
return self.get_schema(tname)
def _get_schema_using_query(self, query):
_, colnames, coltypes = self._execute(query)
return sch.schema(colnames, coltypes)
def _exec_statement(self, stmt, adapter=None):
query = ClickhouseQuery(self, stmt)
result = query.execute()
if adapter is not None:
result = adapter(result)
return result
def _table_command(self, cmd, name, database=None):
qualified_name = self._fully_qualified_name(name, database)
return '{0} {1}'.format(cmd, qualified_name)
@property
def version(self):
self.con.connection.force_connect()
try:
server = self.con.connection.server_info
vstring = '{}.{}.{}'.format(server.version_major,
server.version_minor,
server.revision)
except Exception:
self.con.connection.disconnect()
raise
else:
return parse_version(vstring)
| apache-2.0 |
massmutual/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
dgies/incubator-airflow | airflow/contrib/plugins/metastore_browser/main.py | 62 | 5773 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import json
from flask import Blueprint, request
from flask_admin import BaseView, expose
import pandas as pd
from airflow.hooks.hive_hooks import HiveMetastoreHook, HiveCliHook
from airflow.hooks.mysql_hook import MySqlHook
from airflow.hooks.presto_hook import PrestoHook
from airflow.plugins_manager import AirflowPlugin
from airflow.www import utils as wwwutils
METASTORE_CONN_ID = 'metastore_default'
METASTORE_MYSQL_CONN_ID = 'metastore_mysql'
PRESTO_CONN_ID = 'presto_default'
HIVE_CLI_CONN_ID = 'hive_default'
DEFAULT_DB = 'default'
DB_WHITELIST = None
DB_BLACKLIST = ['tmp']
TABLE_SELECTOR_LIMIT = 2000
# Keeping pandas from truncating long strings
pd.set_option('display.max_colwidth', -1)
# Creating a flask admin BaseView
class MetastoreBrowserView(BaseView, wwwutils.DataProfilingMixin):
@expose('/')
def index(self):
sql = """
SELECT
a.name as db, db_location_uri as location,
count(1) as object_count, a.desc as description
FROM DBS a
JOIN TBLS b ON a.DB_ID = b.DB_ID
GROUP BY a.name, db_location_uri, a.desc
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
df.db = (
'<a href="/admin/metastorebrowserview/db/?db=' +
df.db + '">' + df.db + '</a>')
table = df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
escape=False,
na_rep='',)
return self.render(
"metastore_browser/dbs.html", table=table)
@expose('/table/')
def table(self):
table_name = request.args.get("table")
m = HiveMetastoreHook(METASTORE_CONN_ID)
table = m.get_table(table_name)
return self.render(
"metastore_browser/table.html",
table=table, table_name=table_name, datetime=datetime, int=int)
@expose('/db/')
def db(self):
db = request.args.get("db")
m = HiveMetastoreHook(METASTORE_CONN_ID)
tables = sorted(m.get_tables(db=db), key=lambda x: x.tableName)
return self.render(
"metastore_browser/db.html", tables=tables, db=db)
@wwwutils.gzipped
@expose('/partitions/')
def partitions(self):
schema, table = request.args.get("table").split('.')
sql = """
SELECT
a.PART_NAME,
a.CREATE_TIME,
c.LOCATION,
c.IS_COMPRESSED,
c.INPUT_FORMAT,
c.OUTPUT_FORMAT
FROM PARTITIONS a
JOIN TBLS b ON a.TBL_ID = b.TBL_ID
JOIN DBS d ON b.DB_ID = d.DB_ID
JOIN SDS c ON a.SD_ID = c.SD_ID
WHERE
b.TBL_NAME like '{table}' AND
d.NAME like '{schema}'
ORDER BY PART_NAME DESC
""".format(**locals())
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@wwwutils.gzipped
@expose('/objects/')
def objects(self):
where_clause = ''
if DB_WHITELIST:
dbs = ",".join(["'" + db + "'" for db in DB_WHITELIST])
where_clause = "AND b.name IN ({})".format(dbs)
if DB_BLACKLIST:
dbs = ",".join(["'" + db + "'" for db in DB_BLACKLIST])
where_clause = "AND b.name NOT IN ({})".format(dbs)
sql = """
SELECT CONCAT(b.NAME, '.', a.TBL_NAME), TBL_TYPE
FROM TBLS a
JOIN DBS b ON a.DB_ID = b.DB_ID
WHERE
a.TBL_NAME NOT LIKE '%tmp%' AND
a.TBL_NAME NOT LIKE '%temp%' AND
b.NAME NOT LIKE '%tmp%' AND
b.NAME NOT LIKE '%temp%'
{where_clause}
LIMIT {LIMIT};
""".format(where_clause=where_clause, LIMIT=TABLE_SELECTOR_LIMIT)
h = MySqlHook(METASTORE_MYSQL_CONN_ID)
d = [
{'id': row[0], 'text': row[0]}
for row in h.get_records(sql)]
return json.dumps(d)
@wwwutils.gzipped
@expose('/data/')
def data(self):
table = request.args.get("table")
sql = "SELECT * FROM {table} LIMIT 1000;".format(table=table)
h = PrestoHook(PRESTO_CONN_ID)
df = h.get_pandas_df(sql)
return df.to_html(
classes="table table-striped table-bordered table-hover",
index=False,
na_rep='',)
@expose('/ddl/')
def ddl(self):
table = request.args.get("table")
sql = "SHOW CREATE TABLE {table};".format(table=table)
h = HiveCliHook(HIVE_CLI_CONN_ID)
return h.run_cli(sql)
v = MetastoreBrowserView(category="Plugins", name="Hive Metadata Browser")
# Creating a flask blueprint to intergrate the templates and static folder
bp = Blueprint(
"metastore_browser", __name__,
template_folder='templates',
static_folder='static',
static_url_path='/static/metastore_browser')
# Defining the plugin class
class MetastoreBrowserPlugin(AirflowPlugin):
name = "metastore_browser"
flask_blueprints = [bp]
admin_views = [v]
| apache-2.0 |
hanteng/country-groups | scripts/_construct_data_CPLP.py | 1 | 4047 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
# Correction: 0->ASEAN, 49-> GB, 52 ->KR
import os.path, glob
import requests
from lxml.html import fromstring, tostring, parse
from io import StringIO, BytesIO
import codecs
import pandas as pd
import json
XML_encoding="utf-8"
# Data source
URL_ = "http://www.cplp.org/id-2597.aspx"
URL_country_names_template = "https://raw.githubusercontent.com/hanteng/country-names/master/data/CLDR_country_name_{locale}.tsv"
URL_country_names = URL_country_names_template.format(locale= 'en')
# Xpath extraction
_xpath='//*[@id="subPageMenu"]/li/a/img/@alt'
## Outpuing Lists
PE = 'CPLP'
path_data = u'../data'
outputfn1 = os.path.join(path_data, "PE_org.json")
outputfn2 = os.path.join(path_data, "CLDR_UN_region.tsv")
def url_request (url):
r = requests.get(url)
if r.status_code == 200:
#r.raw.decode_content = True
return r
else:
print ("Downloading the data from {0} failed. Plese check Internet connections.".format(XML_src_url))
return None
def url_local_request (url):
fn_local = os.path.join(path_data, PE+ ".htm")
print (fn_local) #debug
try:
tree = parse(fn_local)
except:
r = url_request (url)
XML_src=r.content
with codecs.open(fn_local, "w", XML_encoding) as file:
file.write(XML_src.decode(XML_encoding))
#from lxml.html.clean import clean_html
#XML_src = clean_html(XML_src)
tree = fromstring(XML_src)
return tree
t = url_local_request(URL_)
list_country_names_Web = t.xpath(_xpath)
print (list_country_names_Web)
## Retrive data directly from unicode-cldr project hosted at github
print ("Retrieve country names data now ...")
locale = "en"
url = URL_country_names_template.format(locale=locale)
df_results = pd.read_csv(url, sep='\t', encoding='utf-8',
na_values=[], keep_default_na = False,
names = ['c','n'] , index_col='c',
)
## Construct dictionary for country/region names
c_names = df_results.to_dict()['n'] #http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_dict.html
c_names_inv = {v: k for k, v in c_names.items()}
## Country names fuzzy match
from fuzzywuzzy import process
choice=[]
for i, c_name_Web in enumerate(list_country_names_Web):
#found_candidates = [x for x in c_names_inv.keys() if fuzzy_match(x,c_name_Web)==True]
found_candidate = process.extract(c_name_Web, c_names_inv.keys(), limit=1)
found_candidate_c = c_names_inv[found_candidate[0][0]]
choice_item = [i, c_name_Web, found_candidate, found_candidate_c]
#print (choice_item)
choice.append(choice_item)
import ast
done = False
while not(done):
try:
# Note: Python 2.x users should use raw_input, the equivalent of 3.x's input
prn= [repr(x) for x in choice]
print ("\n\r".join(prn))
i = int(input("Please enter your corrections: Serial no (-1:None): "))
if i==-1:
print ("Done!")
done==True
break
else:
if i in range(len(choice)):
c = input("Please enter your corrections: Country code (ISO-alpha2): ")
choice[i][3] = c
else:
print("Sorry, Please revise your input.")
except ValueError:
print("Sorry, I didn't understand that.")
#better try again... Return to the start of the loop
continue
list_country_codes_Web = [x[3] for x in choice]
print (list_country_codes_Web)
print (list_country_names_Web)
print ("==========")
PE_org = dict()
with codecs.open(outputfn1, encoding='utf-8', mode='r+') as fp:
lines=fp.readlines()
PE_org = json.loads(u"".join(lines))
print ("Before:", PE_org.keys())
d={PE: list_country_codes_Web}
print("Adding:",d)
PE_org.update(d)
print ("After:", PE_org.keys())
with codecs.open(outputfn1, encoding='utf-8', mode='w') as fp:
json.dump(PE_org, fp)
| gpl-3.0 |
GiggleLiu/nrg_mapping | nrgmap/tests/test_tick.py | 1 | 1796 | '''
Tests for tickers.
'''
from numpy import *
from numpy.testing import dec,assert_,assert_raises,assert_almost_equal,assert_allclose
from matplotlib.pyplot import *
from scipy import sparse as sps
from scipy.linalg import qr,eigvalsh,norm
import time,pdb,sys
from ..ticklib import *
from ..discretization import *
def random_function(k=5):
'''
Generate a random 1d function.
Parameters:
:k: int, the order of function, as the `fluctuation`.
Return:
function,
'''
return poly1d(random.random(k)*10-5)
def test_tick():
'''test for ticks.'''
tick_types=['log','sclog','adaptive','linear','adaptive_linear','ed']
Lambda=1.7
N=20
wlist=get_wlist(w0=1e-8,Nw=2000,mesh_type='log',Gap=0,D=[-0.5,1])
pmask=wlist>0
ion()
rholist=abs(random_function()(wlist))
if ndim(rholist)>1:
rholist=sqrt((rholist*swapaxes(rholist,1,2)).sum(axis=(1,2)))
colors=['r','g','b','k','y','c']
plts=[]
for i,tick_type in enumerate(tick_types):
offset_y=i
ticker=get_ticker(tick_type,D=wlist[-1],N=N,Lambda=Lambda,Gap=0,wlist=wlist[pmask],rholist=rholist[pmask])
plt=scatter(ticker(arange(2,2+N+1)),offset_y*ones(N+1),edgecolor='none',color=colors[i],label=tick_type)
plts.append(plt)
#for negative branch
ticker_=get_ticker(tick_type,D=-wlist[0],N=N,Lambda=Lambda,Gap=0,wlist=-wlist[~pmask][::-1],rholist=rholist[~pmask][::-1])
plt=scatter(-ticker_(arange(2,2+N+1)),offset_y*ones(N+1),edgecolor='none',color=colors[i],label=tick_type)
#consistancy check
assert_allclose(ticker(arange(1,N+2)),[ticker(i) for i in range(1,N+2)])
legend(plts,tick_types,loc=2)
plot(wlist,rholist)
pdb.set_trace()
if __name__=='__main__':
test_tick()
| mit |
Cassianokunsch/MonitorTemperature | Codigo/setup.py | 2 | 1184 | import sys
from cx_Freeze import setup, Executable
shortcut_table = [
("DesktopShortcut", # Shortcut
"DesktopFolder", # Directory_
"Monitora Temperatura", # Name
"TARGETDIR", # Component_
"[TARGETDIR]Monitora Temperatura.exe",# Target
None, # Arguments
None, # Description
None, # Hotkey
None, # Icon
None, # IconIndex
None, # ShowCmd
'TARGETDIR' # WkDir
)
]
msi_data = {"Shortcut": shortcut_table}
bdist_msi_options = {'data': msi_data}
build_exe_options = {"packages": ["os", "pandas", "threading", "sys", "serial", "subprocess", "pyqtgraph"], "include_files": ["View\\", "Controle\\", "Util\\", "Model\\"]}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup( name = "Monitora Temperatura",
version = "0.1",
description = "Monitora a temperatura da caixa.",
options = {"build_exe": build_exe_options,"bdist_msi": bdist_msi_options},
executables = [Executable("main_app.py", base=base)])
| gpl-3.0 |
mayblue9/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
ibm-watson-iot/blockchain-samples | contracts/industry/carbon_trading.0.6/pyServer.py | 2 | 5380 | import sys #for arguments
from flask import Flask
from flask_cors import CORS, cross_origin
import pandas as pd #for creating DataFrame
import requests #querying for information
import json #for posting to get all the information from blockchain
from sklearn.ensemble import RandomForestRegressor #the regression model
import numpy as np #standard library to convert 1d array to 2d arra
import json
app = Flask(__name__)
prediction = 0
@app.route("/", methods=['GET','OPTIONS'])
@cross_origin(origin='*')
def hello():
#sys.argv = ['analysis.py', 'Will']
#exec(open("analysis.py").read())
#bring in all the information
#getting request from the URL
url = 'https://bb01c9bc-8a22-4329-94cb-fa722fd3bdce_vp0.us.blockchain.ibm.com:443/chaincode'
body = {
"jsonrpc": "2.0",
"method": "query",
"params": {
"type": 1,
"chaincodeID":{
"name":"89684ecf448f90c8fcbf0232aab899aec47e9ac5530db4d6956fc0033a775c48aa73572253ebddfee29d449536fda6f8353570e81e026dee777832d002702521"
},
"ctorMsg": {
"function":"readAsset",
"args":["{\"assetID\":\""+"Will"+"\"}"]
},
"secureContext": "user_type1_fc806186e6"
},
"id":1234
}
bodyStr = json.dumps(body)
headerReq = {'Content-Type': 'application/json', 'Accept':'application/json'}
res = requests.post(url, bodyStr, headers=headerReq)
columns = ["Temperature Celsius", "Temperature Fahrenheit", "Wind Speed", "Wind Gust Speed", "Wind Degrees", "Precipitation", "Carbon Reading"] #column title
#DATAFRAME
df = pd.DataFrame(columns=columns)
df.fillna(0) # with 0s rather than NaNs
if "result" in res.json() and res.json()["result"]["status"] == "OK":
JsonResponse = json.loads(res.json()["result"]["message"])
#check if the fields exists in the response given back
if "reading" in JsonResponse:
#add all the fields to dataframe
sensorReading = JsonResponse["sensorWeatherHistory"]["sensorReading"]
precipitation = JsonResponse["sensorWeatherHistory"]["precipitation"]
tempCel = JsonResponse["sensorWeatherHistory"]["temperatureCelcius"]
tempFah = JsonResponse["sensorWeatherHistory"]["temperatureFahrenheit"]
windDegrees = JsonResponse["sensorWeatherHistory"]["windDegrees"]
windGustSp = JsonResponse["sensorWeatherHistory"]["windGustSpeed"]
windSp = JsonResponse["sensorWeatherHistory"]["windSpeed"]
#adding rows to dataframe
for i in range(len(sensorReading)):
df.loc[len(df)] = [tempCel[i],tempFah[i],windSp[i],windGustSp[i],windDegrees[i],precipitation[i],sensorReading[i]]
#convert dataframe to csv
df.to_csv("output.csv", sep=',', encoding='utf-8')
#trying to make an expected maximum likehood model
columnsPredict = [c for c in columns if c not in ["Carbon Reading"]]
#what are we trying to predict
target = "Carbon Reading"
# Initialize the model with some parameters.
model = RandomForestRegressor(n_estimators=100, min_samples_leaf=1, random_state=1)
# Fit the model to the data.
model.fit(df[columnsPredict], df[target])
# Make predictions.
#test = df.loc[len(df)-1][columnsPredict]
WeatherURL = 'http://api.wunderground.com/api/62493c160d2ce863/forecast10day/q/TX/Austin.json'
weatherRes = requests.get(WeatherURL)
try:
weather_res = weatherRes.json()
#make an dataFrame
testCol = ["Temperature Celsius", "Temperature Fahrenheit", "Wind Speed", "Wind Gust Speed", "Wind Degrees", "Precipitation"] #column title
#DATAFRAME
weatherDF = pd.DataFrame(columns=testCol)
weatherDF.fillna(0) # with 0s rather than NaNs
for i in weather_res["forecast"]["simpleforecast"]["forecastday"]:
cH = i["high"]["celsius"]
cL = i["low"]["celsius"]
fH = i["high"]["fahrenheit"]
fL = i["low"]["fahrenheit"]
if type(i["high"]["celsius"]) is str:
cH = float(cH)
cL = float(cL)
if type(i["high"]["fahrenheit"]) is str:
fH = float(fH)
fL = float(fL)
weatherDF.loc[len(weatherDF)] = [(cH + cL)/2, (fL + fH)/2, i["avewind"]["kph"], i["maxwind"]["kph"], (i["avewind"]["degrees"] + i["maxwind"]["degrees"])/2, i["qpf_allday"]["mm"]]
#test = np.array(df.loc[len(df)-1][columnsPredict]).reshape((1, -1))
predictions = model.predict(weatherDF)
#add all 10 day forecast values
totalValue = 0
for val in predictions:
totalValue = totalValue + val
print(totalValue)
prediction = totalValue
except ValueError:
print('error')
# data = request.get_json(force=True)
return json.dumps({"prediction": str(prediction)})
if __name__ == "__main__":
app.run(port=2000)
| apache-2.0 |
ntbrewer/Pyspectr | build/lib/Pyspectr/plotter.py | 1 | 7546 | #!/usr/bin/env python3
"""K. Miernik 2012
[email protected]
Distributed under GNU General Public Licence v3
This module provides simple front-end to matplotlib
"""
import math
import numpy
import matplotlib.pyplot as plt
from matplotlib import cm, ticker
from Pyspectr.exceptions import GeneralError as GeneralError
class Plotter:
""" This class communicates with the matplotlib library
and plot the data
"""
def __init__(self, size):
"""Initialize the plot window, size defines the shape and size
of the figure
0 - None,
1 - 8x6,
11 (default) - 12x8,
2 - 2 figs 8x8,
12 - 2 figs 12x8
"""
# Max bins in 2d histogram
self.max_2d_bin = 1024
# Font size of labels and ticks
self.font_size = 20
# Set this variable to False if you want to disable the legend
self.legend = True
# Change this variable to another cmap if you need different colors
self.cmap = cm.RdYlGn_r
# Some selected color maps, you can toggle with toggle_color_map
self.color_maps = [cm.RdYlGn_r, cm.binary, cm.hot, cm.nipy_spectral] #cm.spectral] dvm20180508
if size == 0:
pass
if size == 1:
plt.figure(1, (8, 6))
elif size == 11:
plt.figure(1, (12, 8))
elif size == 2:
plt.figure(1, (8, 6))
plt.figure(2, (8, 6))
elif size == 12:
plt.figure(1, (12, 8))
plt.figure(2, (12, 8))
else:
plt.figure(1, (8, 6))
if size != 0:
plt.tick_params(axis='both', labelsize=self.font_size)
plt.grid()
plt.ion()
plt.show()
def clear(self):
"""Clear current plotting area"""
plt.clf()
plt.tick_params(axis='both', labelsize=self.font_size)
plt.grid()
def xlim(self, x_range):
"""Change X range of a current plot"""
plt.xlim(x_range)
def ylim(self, y_range):
"""Change Y range of a current plot"""
plt.ylim(y_range)
def ylog(self):
"""Change y scale to log"""
plt.yscale('log')
def ylin(self):
"""Change y scale to linear"""
plt.yscale('linear')
def plot1d(self, plot, xlim=None, ylim=None):
""" Plot 1D histogram
The mode defines the way the data are presented,
'histogram' is displayed with steps
'function' with continuus line
'errorbar' with yerrorbars
The norm (normalization factor) and bin_size are given
for the display purposes only. The histogram is not altered.
"""
histo = plot.histogram
if plot.mode == 'histogram':
plt.plot(histo.x_axis, histo.weights,
ls='steps-mid', label=histo.title)
elif plot.mode == 'function':
plt.plot(histo.x_axis, histo.weights,
ls='-', label=histo.title)
elif plot.mode == 'errorbar':
plt.errorbar(histo.x_axis, histo.weights,
yerr=histo.errors,
marker='o', ls='None', label=histo.title)
else:
raise GeneralError('Unknown plot mode {}'.format(plot.mode))
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if self.legend:
plt.legend(loc=0, numpoints=1, fontsize='small')
def plot1d_4panel(self, plot, ranges):
"""
Special 1D histogram plot. The plot is broken into 4 panels (stacked verically)
the ranges variable should be given in a (x0, x1, x2, x3, x4) format, where
xi defines the ranges of the subplots (x0-x1, x1-x2, x2-x3, x3-x4)
"""
for i, r in enumerate(ranges[:-1]):
x0 = r // plot.bin_size
x1 = ranges[i + 1] // plot.bin_size + 1
ax = plt.subplot(4, 1, i + 1)
ax.plot(plot.histogram.x_axis[x0:x1],
plot.histogram.weights[x0:x1],
ls='steps-mid')
ax.set_xlim((r, ranges[i + 1]))
ax.set_xlabel('E (keV)')
plt.tight_layout()
def plot2d(self, plot, xc=None, yc=None, logz=False):
"""Plot 2D histogram
xc is x range, yc is y range
"""
if plot.histogram.dim != 2:
raise GeneralError('plot2d function needs a 2D histogram!')
x = plot.histogram.x_axis
y = plot.histogram.y_axis
w = plot.histogram.weights
# x = plot.histogram.weights.nonzero()[0]
# y = plot.histogram.weights.nonzero()[1]
# w = plot.histogram.weights[x,y]
# xnz=plot.histogram.x_axis.nonzero()
# ynz=plot.histogram.y_axis.nonzero()
# wnz=plot.histogram.weights
# print(xnz,ynz,wnz)
if xc is not None:
x = x[xc[0]:xc[1]]
w = w[xc[0]:xc[1],:]
if yc is not None:
y = y[yc[0]:yc[1]]
w = w[:, yc[0]:yc[1]]
initial_nx = len(x)
initial_ny = len(y)
nx = len(x)
ny = len(y)
binx = 1
biny = 1
# Rebin data if larger than defined number of bins (max_2d_bin)
# This is needed due to the performance of matplotlib with large arrays
if nx > self.max_2d_bin:
binx = math.ceil(nx / self.max_2d_bin)
missing = binx * self.max_2d_bin - nx
if missing > 0:
addx = numpy.arange(plot.histogram.x_axis[-1] + 1,
plot.histogram.x_axis[-1] + missing + 1)
x = numpy.concatenate((x, addx))
nx = len(x)
z = numpy.zeros((missing, ny))
w = numpy.concatenate((w, z), axis=0)
x = numpy.reshape(x, (-1, binx))
x = x.mean(axis=1)
if ny > self.max_2d_bin:
biny = math.ceil(ny / self.max_2d_bin)
missing = biny * self.max_2d_bin - ny
if missing > 0:
addy = numpy.arange(plot.histogram.y_axis[-1] + 1,
plot.histogram.y_axis[-1] + missing + 1)
y = numpy.concatenate((y, addy))
z = numpy.zeros((nx, missing))
w = numpy.concatenate((w, z), axis=1)
y = numpy.reshape(y, (-1, biny))
y = y.mean(axis=1)
nx = len(x)
ny = len(y)
if nx != initial_nx or ny != initial_ny:
w = numpy.reshape(w, (nx, binx, ny, biny)).mean(3).mean(1)
w = numpy.transpose(w)
title = plot.histogram.title
# If logaritmic scale is used, mask values <= 0
if logz:
w = numpy.ma.masked_where(w <= 0, numpy.log10(w))
title += ' (log10)'
plt.title(title)
CS = plt.pcolormesh(x, y, w, cmap=self.cmap)
plt.xlim(xc)
plt.ylim(yc)
plt.colorbar()
def color_map(self, cmap=None):
"""
Change the color map to the cmap object, or toggle to the
next one from the preselected set,
"""
if cmap is None:
try:
self.cmap = self.color_maps[(self.color_maps.\
index(self.cmap) + 1) %
len(self.color_maps)]
except ValueError:
self.cmap = self.color_maps[0]
else:
self.cmap = cmap
| gpl-3.0 |
ishanic/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 72 | 25573 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/edges/plot_medial_transform.py | 11 | 2257 | """
===========================
Medial axis skeletonization
===========================
The medial axis of an object is the set of all points having more than one
closest point on the object's boundary. It is often called the **topological
skeleton**, because it is a 1-pixel wide skeleton of the object, with the same
connectivity as the original object.
Here, we use the medial axis transform to compute the width of the foreground
objects. As the function ``medial_axis`` (``skimage.morphology.medial_axis``)
returns the distance transform in addition to the medial axis (with the keyword
argument ``return_distance=True``), it is possible to compute the distance to
the background for all points of the medial axis with this function. This gives
an estimate of the local width of the objects.
For a skeleton with fewer branches, there exists another skeletonization
algorithm in ``skimage``: ``skimage.morphology.skeletonize``, that computes
a skeleton by iterative morphological thinnings.
"""
import numpy as np
from scipy import ndimage as ndi
from skimage.morphology import medial_axis
import matplotlib.pyplot as plt
def microstructure(l=256):
"""
Synthetic binary data: binary microstructure with blobs.
Parameters
----------
l: int, optional
linear size of the returned image
"""
n = 5
x, y = np.ogrid[0:l, 0:l]
mask = np.zeros((l, l))
generator = np.random.RandomState(1)
points = l * generator.rand(2, n**2)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndi.gaussian_filter(mask, sigma=l/(4.*n))
return mask > mask.mean()
data = microstructure(l=64)
# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)
# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax1.axis('off')
ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
ax2.contour(data, [0.5], colors='w')
ax2.axis('off')
fig.tight_layout()
plt.show()
| bsd-3-clause |
r9y9/librosa | docs/examples/plot_vocal_separation.py | 3 | 4303 | # -*- coding: utf-8 -*-
"""
================
Vocal separation
================
This notebook demonstrates a simple technique for separating vocals (and
other sporadic foreground signals) from accompanying instrumentation.
This is based on the "REPET-SIM" method of `Rafii and Pardo, 2012
<http://www.cs.northwestern.edu/~zra446/doc/Rafii-Pardo%20-%20Music-Voice%20Separation%20using%20the%20Similarity%20Matrix%20-%20ISMIR%202012.pdf>`_, but includes a couple of modifications and extensions:
- FFT windows overlap by 1/4, instead of 1/2
- Non-local filtering is converted into a soft mask by Wiener filtering.
This is similar in spirit to the soft-masking method used by `Fitzgerald, 2012
<http://arrow.dit.ie/cgi/viewcontent.cgi?article=1086&context=argcon>`_,
but is a bit more numerically stable in practice.
"""
# Code source: Brian McFee
# License: ISC
##################
# Standard imports
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
#############################################
# Load an example with vocals.
y, sr = librosa.load('audio/Cheese_N_Pot-C_-_16_-_The_Raps_Well_Clean_Album_Version.mp3', duration=120)
# And compute the spectrogram magnitude and phase
S_full, phase = librosa.magphase(librosa.stft(y))
#######################################
# Plot a 5-second slice of the spectrum
idx = slice(*librosa.time_to_frames([30, 35], sr=sr))
plt.figure(figsize=(12, 4))
librosa.display.specshow(librosa.amplitude_to_db(S_full[:, idx], ref=np.max),
y_axis='log', x_axis='time', sr=sr)
plt.colorbar()
plt.tight_layout()
###########################################################
# The wiggly lines above are due to the vocal component.
# Our goal is to separate them from the accompanying
# instrumentation.
#
# We'll compare frames using cosine similarity, and aggregate similar frames
# by taking their (per-frequency) median value.
#
# To avoid being biased by local continuity, we constrain similar frames to be
# separated by at least 2 seconds.
#
# This suppresses sparse/non-repetetitive deviations from the average spectrum,
# and works well to discard vocal elements.
S_filter = librosa.decompose.nn_filter(S_full,
aggregate=np.median,
metric='cosine',
width=int(librosa.time_to_frames(2, sr=sr)))
# The output of the filter shouldn't be greater than the input
# if we assume signals are additive. Taking the pointwise minimium
# with the input spectrum forces this.
S_filter = np.minimum(S_full, S_filter)
##############################################
# The raw filter output can be used as a mask,
# but it sounds better if we use soft-masking.
# We can also use a margin to reduce bleed between the vocals and instrumentation masks.
# Note: the margins need not be equal for foreground and background separation
margin_i, margin_v = 2, 10
power = 2
mask_i = librosa.util.softmask(S_filter,
margin_i * (S_full - S_filter),
power=power)
mask_v = librosa.util.softmask(S_full - S_filter,
margin_v * S_filter,
power=power)
# Once we have the masks, simply multiply them with the input spectrum
# to separate the components
S_foreground = mask_v * S_full
S_background = mask_i * S_full
##########################################
# Plot the same slice, but separated into its foreground and background
# sphinx_gallery_thumbnail_number = 2
plt.figure(figsize=(12, 8))
plt.subplot(3, 1, 1)
librosa.display.specshow(librosa.amplitude_to_db(S_full[:, idx], ref=np.max),
y_axis='log', sr=sr)
plt.title('Full spectrum')
plt.colorbar()
plt.subplot(3, 1, 2)
librosa.display.specshow(librosa.amplitude_to_db(S_background[:, idx], ref=np.max),
y_axis='log', sr=sr)
plt.title('Background')
plt.colorbar()
plt.subplot(3, 1, 3)
librosa.display.specshow(librosa.amplitude_to_db(S_foreground[:, idx], ref=np.max),
y_axis='log', x_axis='time', sr=sr)
plt.title('Foreground')
plt.colorbar()
plt.tight_layout()
plt.show()
| isc |
kevin-intel/scikit-learn | examples/tree/plot_cost_complexity_pruning.py | 17 | 4620 | """
========================================================
Post pruning decision trees with cost complexity pruning
========================================================
.. currentmodule:: sklearn.tree
The :class:`DecisionTreeClassifier` provides parameters such as
``min_samples_leaf`` and ``max_depth`` to prevent a tree from overfiting. Cost
complexity pruning provides another option to control the size of a tree. In
:class:`DecisionTreeClassifier`, this pruning technique is parameterized by the
cost complexity parameter, ``ccp_alpha``. Greater values of ``ccp_alpha``
increase the number of nodes pruned. Here we only show the effect of
``ccp_alpha`` on regularizing the trees and how to choose a ``ccp_alpha``
based on validation scores.
See also :ref:`minimal_cost_complexity_pruning` for details on pruning.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.tree import DecisionTreeClassifier
# %%
# Total impurity of leaves vs effective alphas of pruned tree
# ---------------------------------------------------------------
# Minimal cost complexity pruning recursively finds the node with the "weakest
# link". The weakest link is characterized by an effective alpha, where the
# nodes with the smallest effective alpha are pruned first. To get an idea of
# what values of ``ccp_alpha`` could be appropriate, scikit-learn provides
# :func:`DecisionTreeClassifier.cost_complexity_pruning_path` that returns the
# effective alphas and the corresponding total leaf impurities at each step of
# the pruning process. As alpha increases, more of the tree is pruned, which
# increases the total impurity of its leaves.
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = DecisionTreeClassifier(random_state=0)
path = clf.cost_complexity_pruning_path(X_train, y_train)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
# %%
# In the following plot, the maximum effective alpha value is removed, because
# it is the trivial tree with only one node.
fig, ax = plt.subplots()
ax.plot(ccp_alphas[:-1], impurities[:-1], marker='o', drawstyle="steps-post")
ax.set_xlabel("effective alpha")
ax.set_ylabel("total impurity of leaves")
ax.set_title("Total Impurity vs effective alpha for training set")
# %%
# Next, we train a decision tree using the effective alphas. The last value
# in ``ccp_alphas`` is the alpha value that prunes the whole tree,
# leaving the tree, ``clfs[-1]``, with one node.
clfs = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha)
clf.fit(X_train, y_train)
clfs.append(clf)
print("Number of nodes in the last tree is: {} with ccp_alpha: {}".format(
clfs[-1].tree_.node_count, ccp_alphas[-1]))
# %%
# For the remainder of this example, we remove the last element in
# ``clfs`` and ``ccp_alphas``, because it is the trivial tree with only one
# node. Here we show that the number of nodes and tree depth decreases as alpha
# increases.
clfs = clfs[:-1]
ccp_alphas = ccp_alphas[:-1]
node_counts = [clf.tree_.node_count for clf in clfs]
depth = [clf.tree_.max_depth for clf in clfs]
fig, ax = plt.subplots(2, 1)
ax[0].plot(ccp_alphas, node_counts, marker='o', drawstyle="steps-post")
ax[0].set_xlabel("alpha")
ax[0].set_ylabel("number of nodes")
ax[0].set_title("Number of nodes vs alpha")
ax[1].plot(ccp_alphas, depth, marker='o', drawstyle="steps-post")
ax[1].set_xlabel("alpha")
ax[1].set_ylabel("depth of tree")
ax[1].set_title("Depth vs alpha")
fig.tight_layout()
# %%
# Accuracy vs alpha for training and testing sets
# ----------------------------------------------------
# When ``ccp_alpha`` is set to zero and keeping the other default parameters
# of :class:`DecisionTreeClassifier`, the tree overfits, leading to
# a 100% training accuracy and 88% testing accuracy. As alpha increases, more
# of the tree is pruned, thus creating a decision tree that generalizes better.
# In this example, setting ``ccp_alpha=0.015`` maximizes the testing accuracy.
train_scores = [clf.score(X_train, y_train) for clf in clfs]
test_scores = [clf.score(X_test, y_test) for clf in clfs]
fig, ax = plt.subplots()
ax.set_xlabel("alpha")
ax.set_ylabel("accuracy")
ax.set_title("Accuracy vs alpha for training and testing sets")
ax.plot(ccp_alphas, train_scores, marker='o', label="train",
drawstyle="steps-post")
ax.plot(ccp_alphas, test_scores, marker='o', label="test",
drawstyle="steps-post")
ax.legend()
plt.show()
| bsd-3-clause |
rahuldan/sympy | sympy/physics/quantum/tests/test_circuitplot.py | 93 | 2065 | from sympy.physics.quantum.circuitplot import labeller, render_label, Mz, CreateOneQubitGate,\
CreateCGate
from sympy.physics.quantum.gate import CNOT, H, SWAP, CGate, S, T
from sympy.external import import_module
from sympy.utilities.pytest import skip
mpl = import_module('matplotlib')
def test_render_label():
assert render_label('q0') == r'$|q0\rangle$'
assert render_label('q0', {'q0': '0'}) == r'$|q0\rangle=|0\rangle$'
def test_Mz():
assert str(Mz(0)) == 'Mz(0)'
def test_create1():
Qgate = CreateOneQubitGate('Q')
assert str(Qgate(0)) == 'Q(0)'
def test_createc():
Qgate = CreateCGate('Q')
assert str(Qgate([1],0)) == 'C((1),Q(0))'
def test_labeller():
"""Test the labeller utility"""
assert labeller(2) == ['q_1', 'q_0']
assert labeller(3,'j') == ['j_2', 'j_1', 'j_0']
def test_cnot():
"""Test a simple cnot circuit. Right now this only makes sure the code doesn't
raise an exception, and some simple properties
"""
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
c = CircuitPlot(CNOT(1,0),2)
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == []
def test_ex1():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(CNOT(1,0)*H(1),2,labels=labeller(2))
assert c.ngates == 2
assert c.nqubits == 2
assert c.labels == ['q_1', 'q_0']
def test_ex4():
if not mpl:
skip("matplotlib not installed")
else:
from sympy.physics.quantum.circuitplot import CircuitPlot
c = CircuitPlot(SWAP(0,2)*H(0)* CGate((0,),S(1)) *H(1)*CGate((0,),T(2))\
*CGate((1,),S(2))*H(2),3,labels=labeller(3,'j'))
assert c.ngates == 7
assert c.nqubits == 3
assert c.labels == ['j_2', 'j_1', 'j_0']
| bsd-3-clause |
zasdfgbnm/tensorflow | tensorflow/examples/learn/iris_custom_decay_dnn.py | 43 | 3572 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with exponential decay."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
import tensorflow as tf
X_FEATURE = 'x' # Name of the input feature.
def my_model(features, labels, mode):
"""DNN with three hidden layers."""
# Create three fully connected layers respectively of size 10, 20, and 10.
net = features[X_FEATURE]
for units in [10, 20, 10]:
net = tf.layers.dense(net, units=units, activation=tf.nn.relu)
# Compute logits (1 per class).
logits = tf.layers.dense(net, 3, activation=None)
# Compute predictions.
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
# Compute loss.
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Create training op with exponentially decaying learning rate.
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
learning_rate = tf.train.exponential_decay(
learning_rate=0.1, global_step=global_step,
decay_steps=100, decay_rate=0.001)
optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
# Compute evaluation metrics.
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = model_selection.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = tf.estimator.Estimator(model_fn=my_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_train}, y=y_train, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=1000)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={X_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
jakevdp/bokeh | bokeh/sampledata/daylight.py | 4 | 2482 | """Daylight hours from http://www.sunrisesunset.com """
import re
import datetime
import requests
from six.moves import xrange
from os.path import join, abspath, dirname
import pandas as pd
url = "http://sunrisesunset.com/calendar.asp"
r0 = re.compile("<[^>]+>| |[\r\n\t]")
r1 = re.compile(r"(\d+)(DST Begins|DST Ends)?Sunrise: (\d+):(\d\d)Sunset: (\d+):(\d\d)")
def fetch_daylight_hours(lat, lon, tz, dst, year):
"""Fetch daylight hours from sunrisesunset.com for a given location.
Parameters
----------
lat : float
Location's latitude.
lon : float
Location's longitude.
tz : int or float
Time zone offset from UTC. Use floats for half-hour time zones.
dst : int
Daylight saving type, e.g. 0 -> none, 1 -> North America, 2 -> Europe.
See sunrisesunset.com/custom.asp for other possible values.
year : int
Year (1901..2099).
"""
daylight = []
summer = 0 if lat >= 0 else 1
for month in xrange(1, 12+1):
args = dict(url=url, lat=lat, lon=lon, tz=tz, dst=dst, year=year, month=month)
response = requests.get("%(url)s?comb_city_info=_;%(lon)s;%(lat)s;%(tz)s;%(dst)s&month=%(month)s&year=%(year)s&time_type=1&wadj=1" % args)
entries = r1.findall(r0.sub("", response.text))
for day, note, sunrise_hour, sunrise_minute, sunset_hour, sunset_minute in entries:
if note == "DST Begins":
summer = 1
elif note == "DST Ends":
summer = 0
date = datetime.date(year, month, int(day))
sunrise = datetime.time(int(sunrise_hour), int(sunrise_minute))
sunset = datetime.time(int(sunset_hour), int(sunset_minute))
daylight.append([date, sunrise, sunset, summer])
return pd.DataFrame(daylight, columns=["Date", "Sunrise", "Sunset", "Summer"])
# daylight_warsaw_2013 = fetch_daylight_hours(52.2297, -21.0122, 1, 2, 2013)
# daylight_warsaw_2013.to_csv("bokeh/sampledata/daylight_warsaw_2013.csv", index=False)
def load_daylight_hours(file):
path = join(dirname(abspath(__file__)), file)
df = pd.read_csv(path, parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
daylight_warsaw_2013 = load_daylight_hours("daylight_warsaw_2013.csv")
| bsd-3-clause |
gnina/scripts | affinity_search/ga_addrequests.py | 1 | 8462 | #!/usr/bin/env python
'''Train a random forest on model performance from an sql database and then
run a genetic algorithm to propose new, better models to run.
'''
import sys, re, MySQLdb, argparse, os, json, subprocess
import pandas as pd
import makemodel
import numpy as np
from MySQLdb.cursors import DictCursor
from outputjson import makejson
from MySQLdb.cursors import DictCursor
from frozendict import frozendict
import sklearn
from sklearn.ensemble import *
from sklearn.preprocessing import *
from sklearn.feature_extraction import *
import deap
from deap import base, creator, gp, tools
from deap import algorithms
from deap import *
import multiprocessing
def getcursor(host,passwd,db):
'''create a connection and return a cursor;
doing this guards against dropped connections'''
conn = MySQLdb.connect (host = host,user = "opter",passwd=passwd,db=db)
conn.autocommit(True)
cursor = conn.cursor(DictCursor)
return cursor
def cleanparams(p):
'''standardize params that do not matter'''
modeldefaults = makemodel.getdefaults()
for i in range(1,6):
if p['conv%d_width'%i] == 0:
for suffix in ['func', 'init', 'norm', 'size', 'stride', 'width']:
name = 'conv%d_%s'%(i,suffix)
p[name] = modeldefaults[name]
if p['pool%d_size'%i] == 0:
name = 'pool%d_type'%i
p[name] = modeldefaults[name]
if p['fc_pose_hidden'] == 0:
p['fc_pose_func'] = modeldefaults['fc_pose_func']
p['fc_pose_hidden2'] = modeldefaults['fc_pose_hidden2']
p['fc_pose_func2'] = modeldefaults['fc_pose_func2']
p['fc_pose_init'] = modeldefaults['fc_pose_init']
elif p['fc_pose_hidden2'] == 0:
p['fc_pose_hidden2'] = modeldefaults['fc_pose_hidden2']
p['fc_pose_func2'] = modeldefaults['fc_pose_func2']
if p['fc_affinity_hidden'] == 0:
p['fc_affinity_func'] = modeldefaults['fc_affinity_func']
p['fc_affinity_hidden2'] = modeldefaults['fc_affinity_hidden2']
p['fc_affinity_func2'] = modeldefaults['fc_affinity_func2']
p['fc_affinity_init'] = modeldefaults['fc_affinity_init']
elif p['fc_affinity_hidden2'] == 0:
p['fc_affinity_hidden2'] = modeldefaults['fc_affinity_hidden2']
p['fc_affinity_func2'] = modeldefaults['fc_affinity_func2']
return p
def randParam(param, choices):
'''randomly select a choice for param'''
if isinstance(choices, makemodel.Range): #discretize
choices = np.linspace(choices.min,choices.max, 9)
return np.asscalar(np.random.choice(choices))
def randomIndividual():
ret = dict()
options = makemodel.getoptions()
for (param,choices) in options.items():
ret[param] = randParam(param, choices)
return cleanparams(ret)
def evaluateIndividual(ind):
x = dictvec.transform(ind)
return [rf.predict(x)[0]]
def mutateIndividual(ind, indpb=0.05):
'''for each param, with prob indpb randomly sample another choice'''
options = makemodel.getoptions()
for (param,choices) in options.items():
if np.random.rand() < indpb:
ind[param] = randParam(param, choices)
return (ind,)
def crossover(ind1, ind2, indpdb=0.5):
'''swap choices with probability indpb'''
options = makemodel.getoptions()
for (param,choices) in options.items():
if np.random.rand() < indpdb:
tmp = ind1[param]
ind1[param] = ind2[param]
ind2[param] = tmp
return (ind1,ind2)
def runGA(pop):
'''run GA with early stopping if not improving'''
hof = tools.HallOfFame(10)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
best = 0
pop = toolbox.clone(pop)
for i in range(40):
pop, log = algorithms.eaMuPlusLambda(pop, toolbox, mu=300, lambda_=300, cxpb=0.5, mutpb=0.2, ngen=25,
stats=stats, halloffame=hof, verbose=True)
newmax = log[-1]['max']
if best == newmax:
break
best = newmax
return pop
def addrows(config,host,db,password):
'''add rows from fname into database, starting at row start'''
conn = MySQLdb.connect (host = host,user = "opter",passwd=password,db=db)
cursor = conn.cursor()
items = list(config.items())
names = ','.join([str(n) for (n,v) in items])
values = ','.join(['%s' for (n,v) in items])
names += ',id'
values += ',"REQUESTED"'
#do five variations
for split in range(5):
seed = np.random.randint(0,100000)
n = names + ',split,seed'
v = values + ',%d,%d' % (split,seed)
insert = 'INSERT INTO params (%s) VALUES (%s)' % (n,v)
cursor.execute(insert,[v for (n,v) in items])
conn.commit()
parser = argparse.ArgumentParser(description='Generate more configurations with random forest and genetic algorithms')
parser.add_argument('--host',type=str,help='Database host',required=True)
parser.add_argument('-p','--password',type=str,help='Database password',required=True)
parser.add_argument('--db',type=str,help='Database name',default='database')
parser.add_argument('--pending_threshold',type=int,default=0,help='Number of pending jobs that triggers an update')
parser.add_argument('-n','--num_configs',type=int,default=1,help='Number of configs to generate - will add a multiple as many jobs')
args = parser.parse_args()
# first see how many id=REQUESTED jobs there are
cursor = getcursor(args.host,args.password,args.db)
cursor.execute('SELECT COUNT(*) FROM params WHERE id = "REQUESTED"')
rows = cursor.fetchone()
pending = list(rows.values())[0]
#print "Pending jobs:",pending
sys.stdout.write('%d '%pending)
sys.stdout.flush()
#if more than pending_threshold, quit
if pending > args.pending_threshold:
sys.exit(0)
cursor = getcursor(args.host,args.password,args.db)
cursor.execute('SELECT * FROM params WHERE id != "REQUESTED"')
rows = cursor.fetchall()
data = pd.DataFrame(list(rows))
#make errors zero - appropriate if error is due to parameters
data.loc[data.id == 'ERROR','R'] = 0
data.loc[data.id == 'ERROR','rmse'] = 0
data.loc[data.id == 'ERROR','top'] = 0
data.loc[data.id == 'ERROR','auc'] = 0
data['Rtop'] = data.R*data.top
data = data.dropna('index').apply(pd.to_numeric, errors='ignore')
#convert data to be useful for sklearn
notparams = ['R','auc','Rtop','id','msg','rmse','seed','serial','time','top','split']
X = data.drop(notparams,axis=1)
y = data.Rtop
dictvec = DictVectorizer()
#standardize meaningless params
Xv = dictvec.fit_transform(list(map(cleanparams,X.to_dict(orient='records'))))
print("\nTraining %d\n"%Xv.shape[0])
#train model
rf = RandomForestRegressor(n_estimators=20)
rf.fit(Xv,y)
#set up GA
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", dict, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("individual", tools.initIterate, creator.Individual, randomIndividual)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mutate",mutateIndividual)
toolbox.register("mate",crossover)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluateIndividual)
pool = multiprocessing.Pool()
toolbox.register("map", pool.map)
#setup initial population
initpop = [ creator.Individual(cleanparams(x)) for x in X.to_dict('records')]
evals = pool.map(toolbox.evaluate, initpop)
top = sorted([l[0] for l in evals],reverse=True)[0]
print("Best in training set: %f"%top)
seen = set(map(frozendict,initpop))
#include some random individuals
randpop = toolbox.population(n=len(initpop))
pop = runGA(initpop+randpop)
#make sure sorted
pop = sorted(pop,key=lambda x: -x.fitness.values[0])
#remove already evaluated configs
pop = [p for p in pop if frozendict(p) not in seen]
print("Best recommended: %f"%pop[0].fitness.values[0])
uniquified = []
for config in pop:
config = cleanparams(config)
fr = frozendict(config)
if fr not in seen:
seen.add(fr)
uniquified.append(config)
print(len(uniquified),len(pop))
for config in uniquified[:args.num_configs]:
addrows(config, args.host,args.db,args.password)
| bsd-3-clause |
victorbergelin/scikit-learn | examples/svm/plot_svm_margin.py | 318 | 2328 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM Margins Example
=========================================================
The plots below illustrate the effect the parameter `C` has
on the separation line. A large value of `C` basically tells
our model that we do not have that much faith in our data's
distribution, and will only consider points close to line
of separation.
A small value of `C` includes more/all the observations, allowing
the margins to be calculated using all the data in the area.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# figure number
fignum = 1
# fit the model
for name, penalty in (('unreg', 1), ('reg', 0.05)):
clf = svm.SVC(kernel='linear', C=penalty)
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
margin = 1 / np.sqrt(np.sum(clf.coef_ ** 2))
yy_down = yy + a * margin
yy_up = yy - a * margin
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -4.8
x_max = 4.2
y_min = -6
y_max = 6
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.predict(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z, cmap=plt.cm.Paired)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/tests/test_converter_utils.py | 1 | 18049 | import random
import string
import unittest
from nose.tools import assert_equal, assert_list_equal, raises
import numpy as np
import pandas as pd
from py_stringsimjoin.utils.converter import dataframe_column_to_str, \
series_to_str
class DataframeColumnToStrTestCases(unittest.TestCase):
def setUp(self):
float_col = pd.Series(np.random.randn(10)).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
float_col_with_int_val = pd.Series(
np.random.randint(1, 100, 10)).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
str_col = pd.Series([random.choice(string.ascii_lowercase)
for _ in range(10)]).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
int_col = pd.Series(np.random.randint(1, 100, 20))
nan_col = pd.Series([np.NaN for _ in range(20)])
self.dataframe = pd.DataFrame({'float_col': float_col,
'float_col_with_int_val': float_col_with_int_val,
'int_col': int_col,
'str_col': str_col,
'nan_col': nan_col})
def test_str_col(self):
assert_equal(self.dataframe['str_col'].dtype, object)
out_df = dataframe_column_to_str(self.dataframe, 'str_col',
inplace=False, return_col=False)
assert_equal(type(out_df), pd.DataFrame)
assert_equal(out_df['str_col'].dtype, object)
assert_equal(self.dataframe['str_col'].dtype, object)
assert_equal(sum(pd.isnull(self.dataframe['str_col'])),
sum(pd.isnull(out_df['str_col'])))
def test_int_col(self):
assert_equal(self.dataframe['int_col'].dtype, int)
out_df = dataframe_column_to_str(self.dataframe, 'int_col',
inplace=False, return_col=False)
assert_equal(type(out_df), pd.DataFrame)
assert_equal(out_df['int_col'].dtype, object)
assert_equal(self.dataframe['int_col'].dtype, int)
assert_equal(sum(pd.isnull(out_df['int_col'])), 0)
def test_float_col(self):
assert_equal(self.dataframe['float_col'].dtype, float)
out_df = dataframe_column_to_str(self.dataframe, 'float_col',
inplace=False, return_col=False)
assert_equal(type(out_df), pd.DataFrame)
assert_equal(out_df['float_col'].dtype, object)
assert_equal(self.dataframe['float_col'].dtype, float)
assert_equal(sum(pd.isnull(self.dataframe['float_col'])),
sum(pd.isnull(out_df['float_col'])))
def test_float_col_with_int_val(self):
assert_equal(self.dataframe['float_col_with_int_val'].dtype, float)
out_df = dataframe_column_to_str(
self.dataframe, 'float_col_with_int_val',
inplace=False, return_col=False)
assert_equal(type(out_df), pd.DataFrame)
assert_equal(out_df['float_col_with_int_val'].dtype, object)
assert_equal(self.dataframe['float_col_with_int_val'].dtype, float)
assert_equal(sum(pd.isnull(self.dataframe['float_col_with_int_val'])),
sum(pd.isnull(out_df['float_col_with_int_val'])))
for idx, row in self.dataframe.iterrows():
if pd.isnull(row['float_col_with_int_val']):
continue
assert_equal(str(int(row['float_col_with_int_val'])),
out_df.loc[idx]['float_col_with_int_val'])
def test_str_col_with_inplace(self):
assert_equal(self.dataframe['str_col'].dtype, object)
nan_cnt_before = sum(pd.isnull(self.dataframe['str_col']))
flag = dataframe_column_to_str(self.dataframe, 'str_col',
inplace=True, return_col=False)
assert_equal(flag, True)
assert_equal(self.dataframe['str_col'].dtype, object)
nan_cnt_after = sum(pd.isnull(self.dataframe['str_col']))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_str_col_with_return_col(self):
assert_equal(self.dataframe['str_col'].dtype, object)
nan_cnt_before = sum(pd.isnull(self.dataframe['str_col']))
out_series = dataframe_column_to_str(self.dataframe, 'str_col',
inplace=False, return_col=True)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.dataframe['str_col'].dtype, object)
nan_cnt_after = sum(pd.isnull(out_series))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_int_col_with_inplace(self):
assert_equal(self.dataframe['int_col'].dtype, int)
flag = dataframe_column_to_str(self.dataframe, 'int_col',
inplace=True, return_col=False)
assert_equal(flag, True)
assert_equal(self.dataframe['int_col'].dtype, object)
assert_equal(sum(pd.isnull(self.dataframe['int_col'])), 0)
def test_int_col_with_return_col(self):
assert_equal(self.dataframe['int_col'].dtype, int)
out_series = dataframe_column_to_str(self.dataframe, 'int_col',
inplace=False, return_col=True)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.dataframe['int_col'].dtype, int)
assert_equal(sum(pd.isnull(out_series)), 0)
def test_float_col_with_inplace(self):
assert_equal(self.dataframe['float_col'].dtype, float)
nan_cnt_before = sum(pd.isnull(self.dataframe['float_col']))
flag = dataframe_column_to_str(self.dataframe, 'float_col',
inplace=True, return_col=False)
assert_equal(flag, True)
assert_equal(self.dataframe['float_col'].dtype, object)
nan_cnt_after = sum(pd.isnull(self.dataframe['float_col']))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_float_col_with_return_col(self):
assert_equal(self.dataframe['float_col'].dtype, float)
nan_cnt_before = sum(pd.isnull(self.dataframe['float_col']))
out_series = dataframe_column_to_str(self.dataframe, 'float_col',
inplace=False, return_col=True)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.dataframe['float_col'].dtype, float)
nan_cnt_after = sum(pd.isnull(out_series))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_nan_col_with_inplace(self):
assert_equal(self.dataframe['nan_col'].dtype, float)
nan_cnt_before = sum(pd.isnull(self.dataframe['nan_col']))
flag = dataframe_column_to_str(self.dataframe, 'nan_col',
inplace=True, return_col=False)
assert_equal(flag, True)
assert_equal(self.dataframe['nan_col'].dtype, object)
nan_cnt_after = sum(pd.isnull(self.dataframe['nan_col']))
assert_equal(nan_cnt_before, nan_cnt_after)
@raises(AssertionError)
def test_invalid_dataframe(self):
dataframe_column_to_str([], 'test_col')
@raises(AssertionError)
def test_invalid_col_name(self):
dataframe_column_to_str(self.dataframe, 'invalid_col')
@raises(AssertionError)
def test_invalid_inplace_flag(self):
dataframe_column_to_str(self.dataframe, 'str_col', inplace=None)
@raises(AssertionError)
def test_invalid_return_col_flag(self):
dataframe_column_to_str(self.dataframe, 'str_col',
inplace=True, return_col=None)
@raises(AssertionError)
def test_invalid_flag_combination(self):
dataframe_column_to_str(self.dataframe, 'str_col',
inplace=True, return_col=True)
class SeriesToStrTestCases(unittest.TestCase):
def setUp(self):
self.float_col = pd.Series(np.random.randn(10)).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
self.float_col_with_int_val = pd.Series(
np.random.randint(1, 100, 10)).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
self.str_col = pd.Series([random.choice(string.ascii_lowercase)
for _ in range(10)]).append(
pd.Series([np.NaN for _ in range(10)], index=range(10, 20)))
self.int_col = pd.Series(np.random.randint(1, 100, 20))
self.nan_col = pd.Series([np.NaN for _ in range(20)])
def test_str_col(self):
assert_equal(self.str_col.dtype, object)
out_series = series_to_str(self.str_col, inplace=False)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.str_col.dtype, object)
assert_equal(sum(pd.isnull(self.str_col)),
sum(pd.isnull(out_series)))
def test_int_col(self):
assert_equal(self.int_col.dtype, int)
out_series = series_to_str(self.int_col, inplace=False)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.int_col.dtype, int)
assert_equal(sum(pd.isnull(out_series)), 0)
def test_float_col(self):
assert_equal(self.float_col.dtype, float)
out_series = series_to_str(self.float_col, inplace=False)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.float_col.dtype, float)
assert_equal(sum(pd.isnull(self.float_col)),
sum(pd.isnull(out_series)))
def test_float_col_with_int_val(self):
assert_equal(self.float_col_with_int_val.dtype, float)
out_series = series_to_str(self.float_col_with_int_val, inplace=False)
assert_equal(type(out_series), pd.Series)
assert_equal(out_series.dtype, object)
assert_equal(self.float_col_with_int_val.dtype, float)
assert_equal(sum(pd.isnull(self.float_col_with_int_val)),
sum(pd.isnull(out_series)))
for idx, val in self.float_col_with_int_val.iteritems():
if pd.isnull(val):
continue
assert_equal(str(int(val)), out_series.loc[idx])
def test_str_col_with_inplace(self):
assert_equal(self.str_col.dtype, object)
nan_cnt_before = sum(pd.isnull(self.str_col))
flag = series_to_str(self.str_col, inplace=True)
assert_equal(flag, True)
assert_equal(self.str_col.dtype, object)
nan_cnt_after = sum(pd.isnull(self.str_col))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_int_col_with_inplace(self):
assert_equal(self.int_col.dtype, int)
flag = series_to_str(self.int_col, inplace=True)
assert_equal(flag, True)
assert_equal(self.int_col.dtype, object)
assert_equal(sum(pd.isnull(self.int_col)), 0)
def test_float_col_with_inplace(self):
assert_equal(self.float_col.dtype, float)
nan_cnt_before = sum(pd.isnull(self.float_col))
flag = series_to_str(self.float_col, inplace=True)
assert_equal(flag, True)
assert_equal(self.float_col.dtype, object)
nan_cnt_after = sum(pd.isnull(self.float_col))
assert_equal(nan_cnt_before, nan_cnt_after)
# test the case with a series containing only NaN values. In this case,
# inplace flag will be ignored.
def test_nan_col_with_inplace(self):
assert_equal(self.nan_col.dtype, float)
nan_cnt_before = sum(pd.isnull(self.nan_col))
out_series = series_to_str(self.nan_col, inplace=True)
assert_equal(out_series.dtype, object)
assert_equal(self.nan_col.dtype, float)
nan_cnt_after = sum(pd.isnull(out_series))
assert_equal(nan_cnt_before, nan_cnt_after)
def test_empty_series_with_inplace(self):
empty_series = pd.Series(dtype=int)
assert_equal(empty_series.dtype, int)
out_series = series_to_str(empty_series, inplace=True)
assert_equal(out_series.dtype, object)
assert_equal(empty_series.dtype, int)
assert_equal(len(out_series), 0)
@raises(AssertionError)
def test_invalid_series(self):
series_to_str([])
@raises(AssertionError)
def test_invalid_inplace_flag(self):
series_to_str(self.int_col, inplace=None)
| bsd-3-clause |
rmcgibbo/msmbuilder | msmbuilder/cluster/agglomerative.py | 3 | 8584 | # Author: Robert McGibbon <[email protected]>
# Contributors:
# Copyright (c) 2014, Stanford University
# All rights reserved.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division
import numpy as np
import six
import scipy.spatial.distance
from scipy.cluster.hierarchy import fcluster
from sklearn.externals.joblib import Memory
from sklearn.utils import check_random_state
from sklearn.base import ClusterMixin, TransformerMixin
from . import MultiSequenceClusterMixin
from ..base import BaseEstimator
try:
from fastcluster import linkage
except ImportError:
from scipy.cluster.hierarchy import linkage
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
__all__ = ['_LandmarkAgglomerative']
POOLING_FUNCTIONS = {
'average': lambda x: np.mean(x, axis=1),
'complete': lambda x: np.max(x, axis=1),
'single': lambda x: np.min(x, axis=1),
}
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def pdist(X, metric='euclidean'):
if isinstance(metric, six.string_types):
return scipy.spatial.distance.pdist(X, metric)
n = len(X)
d = np.empty((n, n))
for i in range(n):
d[i, :] = metric(X, X, i)
return scipy.spatial.distance.squareform(d, checks=False)
def cdist(XA, XB, metric='euclidean'):
if isinstance(metric, six.string_types):
return scipy.spatial.distance.cdist(XA, XB, metric)
nA, nB = len(XA), len(XB)
d = np.empty((nA, nB))
for i in range(nA):
d[i, :] = metric(XB, XA, i)
return d
#-----------------------------------------------------------------------------
# Main Code
#-----------------------------------------------------------------------------
class _LandmarkAgglomerative(ClusterMixin, TransformerMixin):
"""Landmark-based agglomerative hierarchical clustering
Landmark-based agglomerative clustering is a simple scalable version of
"standard" hierarchical clustering which doesn't require computing the full
matrix of pairwise distances between all data points. The idea is
basically to subsample only ``n_landmarks`` "landmark"
data points, cluster them, and then assign labels to the remaining data
points based on their distances to (and the labels of) the landmarks.
Parameters
----------
n_clusters : int
The number of clusters to find.
n_landmarks : int, optional
Memory-saving approximation. Instead of actually clustering every
point, we instead select n_landmark points either randomly or by
striding the data matrix (see ``landmark_strategy``). Then we cluster
the only the landmarks, and then assign the remaining dataset based
on distances to the landmarks. Note that n_landmarks=None is equivalent
to using every point in the dataset as a landmark.
linkage : {'single', 'complete', 'average'}, default='average'
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- average uses the average of the distances of each observation of
the two sets.
- complete or maximum linkage uses the maximum distances between
all observations of the two sets.
- single uses the minimum distance between all observations of the
two sets.
The linkage also effects the predict() method and the use of landmarks.
After computing the distance from each new data point to the landmarks,
the new data point will be assigned to the cluster that minimizes the
linkage function between the new data point and each of the landmarks.
(i.e with ``single``, new data points will be assigned the label of
the closest landmark, with ``average``, it will be assigned the label
of the landmark s.t. the mean distance from the test point to all the
landmarks with that label is minimized, etc.)
memory : Instance of joblib.Memory or string (optional)
Used to cache the output of the computation of the distance matrix.
metric : string or callable, default= "euclidean"
Metric used to compute the distance between samples.
landmark_strategy : {'stride', 'random'}, default='stride'
Method for determining landmark points. Only matters when n_landmarks
is not None. "stride" takes landmarks every n-th data point in X, and
random selects them uniformly at random.
random_state : integer or numpy.RandomState, optional
The generator used to select random landmarks. Only used if
landmark_strategy=='random'. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
References
----------
.. [1] Mullner, D. "Modern hierarchical, agglomerative clustering
algorithms." arXiv:1109.2378 (2011).
Attributes
----------
landmark_labels_
landmarks_
"""
def __init__(self, n_clusters, n_landmarks=None, linkage='average',
memory=Memory(cachedir=None, verbose=0), metric='euclidean',
landmark_strategy='stride', random_state=None):
self.n_clusters = n_clusters
self.n_landmarks = n_landmarks
self.memory = memory
self.metric = metric
self.landmark_strategy = landmark_strategy
self.random_state = random_state
self.linkage = linkage
self.landmark_labels_ = None
self.landmarks_ = None
def fit(self, X, y=None):
"""
Compute agglomerative clustering.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Returns
-------
self
"""
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
if self.n_landmarks is None:
distances = memory.cache(pdist)(X, self.metric)
else:
if self.landmark_strategy == 'random':
land_indices = check_random_state(self.random_state).randint(len(X), size=self.n_landmarks)
else:
land_indices = np.arange(len(X))[::(len(X) // self.n_landmarks)][:self.n_landmarks]
distances = memory.cache(pdist)(X[land_indices], self.metric)
tree = memory.cache(linkage)(distances, method=self.linkage)
self.landmark_labels_ = fcluster(tree, criterion='maxclust', t=self.n_clusters) - 1
if self.n_landmarks is None:
self.landmarks_ = X
else:
self.landmarks_ = X[land_indices]
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
dists = cdist(X, self.landmarks_, self.metric)
try:
pooling_func = POOLING_FUNCTIONS[self.linkage]
except KeyError:
raise ValueError('linkage=%s is not supported' % self.linkage)
pooled_distances = np.empty(len(X))
pooled_distances.fill(np.infty)
labels = np.zeros(len(X), dtype=int)
for i in range(self.n_clusters):
if np.any(self.landmark_labels_ == i):
d = pooling_func(dists[:, self.landmark_labels_ == i])
mask = (d < pooled_distances)
pooled_distances[mask] = d[mask]
labels[mask] = i
return labels
def fit_predict(self, X):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
self.fit(X)
return self.predict(X)
class LandmarkAgglomerative(MultiSequenceClusterMixin, _LandmarkAgglomerative, BaseEstimator):
__doc__ = _LandmarkAgglomerative.__doc__
| lgpl-2.1 |
r-mart/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
jacksarick/My-Code | Python/pi/piguess.py | 1 | 1059 | #!/usr/bin/python
from __future__ import division
import matplotlib.pyplot as plt
from pylab import savefig
from random import randint
from time import time
import sys
filelocation = "/Users/jack.sarick/Desktop/Program/Python/pi/"
filename = filelocation+"pianswer.txt"
temppoint = []
loopcounter = 0
k50, k10, k5 = 50000, 10000, 5000
looptime = sys.argv[1]
def makepi(loop):
global filelocation
global filename
counter = 0
#Starts timer for loop
looptime = time()
#Generates points
for i in range(k50):
temppoint = [randint(0, k10), randint(0, k10)]
if ((((temppoint[0]-k5)**2) + ((temppoint[1]-k5)**2)) <= k5**2):
plt.plot(temppoint[0], temppoint[1], 'bo')
counter += 1
else:
plt.plot(temppoint[0], temppoint[1], 'ro')
#Draws and saves file
plt.axis([0, k10, 0, k10])
savefig(filelocation + 'pi' + str(loop) + '.png', bbox_inches='tight')
#writes estimation and loop time to file
with open(filename,'ab') as f:
f.write(str((counter/k50)*4) + "," + str(time()-looptime) + "\n")
f.close()
#Runs makepi()
makepi(looptime) | mit |
agartland/pysieve | io.py | 1 | 4231 | """
Save and load data, analyses and meta-analyses to and from files (db?)
Not sure how I should organize these functions. Should support both file and db backends
Since I'm just going to pickle both should be easy to implement later
Better to have these external functions that can load data and then instantiate neccessary classes
Confusing though because meta.py has its own load/save functions for loading and saving sets of data/analyses,
but these are only for the simulations i think
"""
__all__ = ['loadSieve',
'saveSieve']
import pickle
import os
import pandas as pd
import os.path as op
def saveSieve(dataPath, obj, dataFn = None, analysisFn = None):
"""Save sieve analysis results and/or data to a file that can be loaded later
Results and data will be kept in separate files for efficiency if needed.
Returns the data and results filenames if successful"""
if dataFn is None:
dataFn = _getFilename(dataPath, obj.data, 'pkl')
if analysisFn is None:
analysisFn = _getFilename(dataPath, obj, 'pkl')
"""If its an analysis object"""
if hasattr(obj, 'methodName'):
isAnalysisObj = True
else:
isAnalysisObj = False
if isAnalysisObj:
analysisClassName = str(obj.__class__).split('.')[-1].replace("'","").replace('>','')
out = {'methodName':obj.methodName,'analysisClassName':analysisClassName,'results':obj.results}
with open(analysisFn, 'wb') as fh:
pickle.dump(out, fh)
"""Now save the data"""
out = {'data':obj.data}
with open(dataFn, 'wb') as fh:
pickle.dump(out, fh)
return dataFn, analysisFn
def loadSieve(dataPath, fn, data = None):
"""Load sieve data OR analysis results from a file
To load data, specify only fn of the data file,
To load results, specify the pre-loaded data object as data:
analysisClassObj = loadSieve(DATA_PATH + analysisFn, loadSieve(DATA_PATH + dataFn))
Parameters
----------
fn : str
Full path to file
data : sub-class of pysieve.sieveData
Specify the data object when loading an analysis object,
Returns
-------
out : sub-class of pysieve.sieveData or pysieve.sieveAnalysis"""
"""Method is compatible across pandas versions and with binary files."""
out = pd.read_pickle(fn)
"""If its an analysis object and we have the data object passed"""
if 'methodName' in out.keys() and not data is None:
out['data'] = data
obj = eval('%s(sievedata = data, sieveresults = results)' % (out['analysisClassName']),globals(),out)
else:
obj = out['data']
return obj
def _getFilename(dataPath, obj, ext):
"""Try to make a filename from as much info is available in the object (data or results)
Returns the filename"""
"""Assume that its an analysis object first"""
if hasattr(obj,'data') and hasattr(obj.data,'N'):
isDataObj = False
else:
isDataObj = True
if not isDataObj:
if obj.data.regionInds is None:
regStr = 'whole'
else:
regStr = '%d_%d' % (obj.data.regionInds[0], obj.data.regionInds[-1])
if obj.results.hlaMethod is None:
filePart = '%s/pyresults/%s.%s.%s.%s.%s' % (obj.data.studyName,obj.methodName,obj.data.proteinName,obj.data.insertName,regStr,ext)
else:
filePart = '%s/pyresults/%s.%s.%s.%s.%s.%s' % (obj.data.studyName,obj.methodName,obj.data.proteinName,obj.data.insertName,regStr,obj.results.hlaMethod,ext)
fn = op.join(dataPath,filePart)
else:
"""Then its a data object"""
if obj.regionInds is None:
regStr = 'whole'
else:
regStr = '%d_%d' % (obj.regionInds[0], obj.regionInds[-1])
if obj.HLAsubset:
filePart = '%s/pyresults/data.HLAsubset.%s.%s.%s.%s' % (obj.studyName,obj.proteinName,obj.insertName,regStr,ext)
else:
filePart = '%s/pyresults/data.%s.%s.%s.%s' % (obj.studyName,obj.proteinName,obj.insertName,regStr,ext)
fn = op.join(dataPath,filePart)
folder,f = os.path.split(fn)
if not os.path.exists(folder):
os.makedirs(folder)
return fn | mit |
capaulson/pyKriging | pyKriging/CrossValidation.py | 1 | 10122 | """
@author: Giorgos
"""
import numpy as np
from matplotlib import pyplot as plt
import pyKriging
from pyKriging.krige import kriging
from pyKriging.utilities import *
import random
import scipy.stats as stats
class Cross_Validation():
def __init__(self, model, name=None):
"""
X- sampling plane
y- Objective function evaluations
name- the name of the model
"""
self.model = model
self.X = self.model.X
self.y = self.model.y
self.n, self.k = np.shape(self.X)
self.predict_list, self.predict_varr, self.scvr = [], [], []
self.name = name
def calculate_RMSE_Rsquared(self, optimiser, nt):
"""
this function calculates the root mean squared error of the interpola-
ted model for a sample of nt test data
Input:
optimiser- optimiser to be used
nt- the size of the sample test data
Output:
RMSE- the root mean squared error of nt sampling points
Rsquared- the correlation coefficient
"""
yi_p, yi, yi_dif, yiyi_p, yiyi, yi_pyi_p = [], [], [], [], [], []
Sample = random.sample([i for i in range(len(self.X))], nt)
Model = kriging(self.X, self.y, name='%s' % self.name)
Model.train(optimiser)
for i, j in enumerate(Sample):
yi_p.append(Model.predict(self.X[j]))
yi.append(self.y[j])
yi_dif.append(yi[i] - yi_p[i])
yiyi_p.append(yi[i]*yi_p[i])
yiyi.append(yi[i]*yi[i])
yi_pyi_p.append(yi_p[i]*yi_p[i])
RMSE = np.sqrt((sum(yi_dif)**2.) / float(nt))
Rsquared = ((float(nt)*sum(yiyi_p) - sum(yi)*sum(yi_p)) /
(np.sqrt((float(nt)*sum(yiyi) - sum(yi)**2.) *
(float(nt)*sum(yi_pyi_p) - sum(yi_p)**2.))))**2.
return ['RMSE = %f' % RMSE, 'Rsquared = %f' % Rsquared]
def calculate_SCVR(self, optimiser='pso', plot=0):
"""
this function calculates the standardised cross-validated residual
(SCVR)
value for each sampling point.
Return an nx1 array with the SCVR value of each sampling point. If plot
is 1, then plot scvr vs doe and y_pred vs y.
Input:
optimiser- optimiser to be used
plot- if 1 plots scvr vs doe and y_pred vs y
Output:
predict_list- list with different interpolated kriging models
excluding
each time one point of the sampling plan
predict_varr- list with the square root of the posterior variance
scvr- the scvr as proposed by Jones et al. (Journal of global
optimisation, 13: 455-492, 1998)
"""
y_normalised = (self.y - np.min(self.y)) / (np.max(self.y) -
np.min(self.y))
y_ = np.copy(self.y)
Kriging_models_i, list_arrays, list_ys, train_list = [], [], [], []
for i in range(self.n):
exclude_value = [i]
idx = list(set(range(self.n)) - set(exclude_value))
list_arrays.append(self.X[idx])
list_ys.append(y_[idx])
Kriging_models_i.append(kriging(list_arrays[i], list_ys[i],
name='%s' % self.name))
train_list.append(Kriging_models_i[i].train(optimizer=optimiser))
self.predict_list.append(Kriging_models_i[i].predict(self.X[i]))
self.predict_varr.append(Kriging_models_i[i].predict_var(
self.X[i]))
self.scvr.append((y_normalised[i] - Kriging_models_i[i].normy(
self.predict_list[i])) /
self.predict_varr[i][0, 0])
if plot == 0:
return self.predict_list, self.predict_varr, self.scvr
elif plot == 1:
fig = plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth= 2.0, frameon=True)
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter([i for i in range(1, self.n+1)], self.scvr, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
ax1.plot([i for i in range(0, self.n+3)], [3]*(self.n+3), 'r')
ax1.plot([i for i in range(0, self.n+3)], [-3]*(self.n+3), 'r')
ax1.set_xlim(0, self.n+2)
ax1.set_ylim(-4, 4)
ax1.set_xlabel('DoE individual')
ax1.set_ylabel('SCVR')
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(self.predict_list, self.y, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
if np.max(self.y) > 0:
ax2.set_ylim(0, np.max(self.y) + 0.00001)
ax2.set_xlim(0, max(self.predict_list) + 0.00001)
else:
ax2.set_ylim(0, np.min(self.y) - 0.00001)
ax2.set_xlim(0, min(self.predict_list) - 0.00001)
ax2.plot(ax2.get_xlim(), ax2.get_ylim(), ls="-", c=".3")
ax2.set_xlabel('predicted y')
ax2.set_ylabel('y')
plt.show()
return self.predict_list, self.predict_varr, self.scvr
else:
raise ValueError('value for plot should be either 0 or 1')
def calculate_transformed_SCVR(self, transformation, optimiser='pso',
plot=0):
"""
this function calculates the transformed standardised cross-validated
residual (SCVR) value for each sampling point. This helps to improve
the model.
Return an nx1 array with the SCVR value of each sampling point. If plot
is 1, then plot scvr vs doe and y_pred vs y.
Input:
optimiser- optimiser to be used
plot- if 1 plots scvr vs doe and y_pred vs y
transformation- the tranformation of the objective function
(logarithmic or inverse)
Output:
predict_list- list with different interpolated kriging models
excluding
each time one point of the sampling plan
predict_varr- list with the square root of the posterior variance
scvr- the scvr as proposed by Jones et al. (Journal of global
optimisation, 13: 455-492, 1998)
"""
y_ = np.copy(self.y)
if transformation == 'logarithmic':
y_ = np.log(y_)
elif transformation == 'inverse':
y_ = -(1.0/y_)
y_normalised = (y_ - np.min(y_)) / (np.max(y_) -
np.min(y_))
Kriging_models_i, list_arrays, list_ys, train_list = [], [], [], []
for i in range(self.n):
exclude_value = [i]
idx = list(set(range(self.n)) - set(exclude_value))
list_arrays.append(self.X[idx])
list_ys.append(y_[idx])
Kriging_models_i.append(kriging(list_arrays[i], list_ys[i],
name='%s' % self.name))
train_list.append(Kriging_models_i[i].train(optimizer=optimiser))
self.predict_list.append(Kriging_models_i[i].predict(self.X[i]))
self.predict_varr.append(Kriging_models_i[i].predict_var(
self.X[i]))
self.scvr.append((y_normalised[i] - Kriging_models_i[i].normy(
self.predict_list[i])) /
self.predict_varr[i][0, 0])
if plot == 0:
return self.predict_list, self.predict_varr, self.scvr
elif plot == 1:
fig = plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth= 2.0, frameon=True)
ax1 = fig.add_subplot(1, 2, 1)
ax1.scatter([i for i in range(1, self.n+1)], self.scvr, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
ax1.plot([i for i in range(0, self.n+3)], [3]*(self.n+3), 'r')
ax1.plot([i for i in range(0, self.n+3)], [-3]*(self.n+3), 'r')
ax1.set_xlim(0, self.n+2)
ax1.set_ylim(-4, 4)
ax1.set_xlabel('DoE individual')
ax1.set_ylabel('SCVR')
ax2 = fig.add_subplot(1, 2, 2)
ax2.scatter(self.predict_list, y_, alpha=0.5,
edgecolor='black', facecolor='b', linewidth=2.)
if np.max(y_) > 0:
ax2.set_ylim(0, np.max(y_) + 0.00001)
ax2.set_xlim(0, max(self.predict_list) + 0.00001)
else:
ax2.set_ylim(0, np.min(y_) - 0.00001)
ax2.set_xlim(0, min(self.predict_list) - 0.00001)
ax2.plot(ax2.get_xlim(), ax2.get_ylim(), ls="-", c=".3")
ax2.set_xlabel('predicted %s' % 'ln(y)' if transformation ==
'logarithmic' else '-1/y')
ax2.set_ylabel('predicted %s' % 'ln(y)' if transformation ==
'logarithmic' else '-1/y')
plt.show()
return self.predict_list, self.predict_varr, self.scvr
else:
raise ValueError('value for plot should be either 0 or 1')
def QQ_plot(self):
"""
returns the QQ-plot with normal distribution
"""
plt.figure(figsize=(12, 8), facecolor='w', edgecolor='k',
linewidth= 2.0, frameon=True)
stats.probplot(self.scvr, dist="norm", plot=plt)
plt.xlabel('SCVR')
plt.ylabel('Standard quantile')
plt.show()
def leave_n_out(self, q=5):
'''
:param q: the numer of groups to split the model data inot
:return:
'''
mseArray = []
for i in splitArrays(self.model,5):
testk = kriging( i[0], i[1] )
testk.train()
for j in range(len(i[2])):
mseArray.append(mse(i[3][j], testk.predict( i[2][j] )))
del(testk)
return np.average(mseArray), np.std(mseArray)
## Example Use Case:
| mit |
glouppe/scikit-learn | sklearn/ensemble/tests/test_weight_boosting.py | 58 | 17158 | """Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
clf = AdaBoostRegressor(random_state=0)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert score > 0.85
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
| bsd-3-clause |
Kirubaharan/hydrology | ch_623/ch_623_stage_area.py | 2 | 4587 | __author__ = 'kiruba'
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib import rc
from scipy.interpolate import griddata
from matplotlib import cm
from matplotlib.path import *
from mpl_toolkits.mplot3d import axes3d, Axes3D
import matplotlib as mpl
import matplotlib.colors as mc
import checkdam.checkdam as cd
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
base_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/623/base_profile_623.csv'
df_base = pd.read_csv(base_file, header=-1, skiprows=1)
# print df_base.head()
# slope_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/616/slope_616.csv'
# df_slope = pd.read_csv(slope_file, header=0)
# print df_slope
df_base_trans = df_base.T
df_base_trans.columns = df_base_trans.ix[0, 0:]
# print df_base_trans
df_base_trans = df_base_trans.ix[1:, 1500:]
print df_base_trans
# raise SystemExit(0)
created_profile = df_base_trans
# print created_profile.head()
sorted_df = created_profile.iloc[0:, 1:]
sorted_df = sorted_df[sorted(sorted_df.columns)]
sorted_df = sorted_df.join(created_profile.iloc[0:, 0], how='right')
created_profile = cd.set_column_sequence(sorted_df, [1500])
# print created_profile.head()
# raise SystemExit(0)
"""
Create (x,y,z) point cloud
"""
z_array = created_profile.iloc[0:, 1:]
columns = z_array.columns
z_array = z_array.values
index = created_profile.iloc[0:,0]
df = pd.DataFrame(z_array, columns=columns).set_index(index)
data_1 = []
for y, row in df.iteritems():
for x, z in row.iteritems():
data_1.append((x, y, z))
data_1_df = pd.DataFrame(data_1, columns=['x', 'y', 'z'])
# print data_1_df.dtypes
# raise SystemExit(0)
X = data_1_df.x
Y = data_1_df.y
Z = data_1_df.z
## contour and 3d surface plotting
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.gca(projection='3d')
# ax = fig.add_subplot(1, 2, 1, projection='3d')
xi = np.linspace(X.min(), X.max(), 100)
yi = np.linspace(Y.min(), Y.max(), 100)
# print len(xi)
# print len(yi)
# print len(Z)
zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid
xig, yig = np.meshgrid(xi, yi)
surf = ax.plot_wireframe(X=xig, Y=yig, Z=zi, rstride=5, cstride=3, linewidth=1)#, cmap=cm.coolwarm, antialiased=False) # 3d plot
# inter_1 = []
# inter_1.append((xi, yi, zi))
# inter = pd.DataFrame(inter_1, columns=['x', 'y', 'z'])
# inter.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/inter.csv') # interpolation data output
# fig.colorbar(surf, shrink=0.5, aspect=5)
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# plt.xlabel(r'\textbf{X} (m)')
# plt.ylabel(r'\textbf{Y} (m)')
# plt.title(r"Profile for 591", fontsize=16)
plt.gca().invert_xaxis() # reverses x axis
# # ax = fig
# plt.savefig('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/linear_interpolation')
plt.show()
# raise SystemExit(0)
# ## trace contours
# Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/
check_dam_height = 0.66 #metre
levels = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1,0.2, 0.3,0.4, 0.5, 0.6, 0.7, 0.8,0.9,1.1,1.2,1.3,1.4, 1.41,1.5 ] #, 3.93]
cmap = cm.hot
norm = mc.BoundaryNorm(levels, cmap.N )
plt.figure(figsize=(11.69, 8.27))
CS = plt.contourf(xi, yi, zi, len(levels), alpha=.75, norm=norm, levels=levels)
C = plt.contour(xi, yi, zi, len(levels), colors='black', linewidth=.5, levels=levels)
plt.clabel(C, inline=1, fontsize=10)
plt.colorbar(CS, shrink=0.5, aspect=5)
plt.yticks(np.arange(0,30, 5))
plt.xticks(np.arange(-6,6, 2))
plt.grid()
plt.gca().invert_xaxis()
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_623/cont_2d')
plt.show()
# contour_area(C)
contour_a = cd.contour_area(CS)
cont_area_df = pd.DataFrame(contour_a, columns=['Z', 'Area'])
plt.plot(cont_area_df['Z'], cont_area_df['Area'])
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.ylabel(r'\textbf{Area} ($m^2$)')
plt.xlabel(r'\textbf{Stage} (m)')
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_623/cont_area_623')
# plt.show()
cont_area_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_623/cont_area.csv')
created_profile.iloc[0] = created_profile.columns
# print created_profile
created_profile.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_623/created_profile_623.csv')
| gpl-3.0 |
nikitasingh981/scikit-learn | examples/plot_missing_values.py | 35 | 3059 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
rknLA/sms-tools | lectures/06-Harmonic-model/plots-code/f0Yin.py | 18 | 1718 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
import sys, os
import essentia.standard as ess
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
import stft as STFT
def f0Yin(x, N, H, minf0, maxf0):
# fundamental frequency detection using the Yin algorithm
# x: input sound, N: window size,
# minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
# returns f0
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=N, type='hann')
pitchYin= ess.PitchYinFFT(minFrequency = minf0, maxFrequency = maxf0)
pin = 0
pend = x.size-N
f0 = []
while pin<pend:
mX = spectrum(window(x[pin:pin+N]))
f0t = pitchYin(mX)
f0 = np.append(f0, f0t[0])
pin += H
return f0
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/bendir.wav')
plt.figure(1, figsize=(9, 7))
N = 2048
H = 256
w = hamming(2048)
mX, pX = STFT.stftAnal(x, fs, w, N, H)
maxplotfreq = 2000.0
frmTime = H*np.arange(mX[:,0].size)/float(fs)
binFreq = fs*np.arange(N*maxplotfreq/fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))
N = 2048
minf0 = 130
maxf0 = 300
H = 256
f0 = f0Yin(x, N, H, minf0, maxf0)
yf0 = UF.sinewaveSynth(f0, .8, H, fs)
frmTime = H*np.arange(f0.size)/float(fs)
plt.plot(frmTime, f0, linewidth=2, color='k')
plt.autoscale(tight=True)
plt.title('mX + f0 (vignesh.wav), YIN: N=2048, H = 256 ')
plt.tight_layout()
plt.savefig('f0Yin.png')
UF.wavwrite(yf0, fs, 'f0Yin.wav')
plt.show()
| agpl-3.0 |
jrbadiabo/Coursera-Stanford-ML-Class | Python_Version/Ex3.Multi-class_Classification_-_NN/displayData.py | 3 | 1597 | import numpy as np
from matplotlib import use
use('TkAgg')
import matplotlib.pyplot as plt
from show import show
def displayData(X):
"""displays 2D data
stored in X in a nice grid. It returns the figure handle h and the
displayed array if requested."""
# Compute rows, cols
m, n = X.shape
example_width = round(np.sqrt(n))
example_height = (n / example_width)
# Compute number of items to display
display_rows = np.floor(np.sqrt(m))
display_cols = np.ceil(m / display_rows)
# Between images padding
pad = 1
# Setup blank display
display_array = - np.ones((pad + display_rows * (example_height + pad),
pad + display_cols * (example_width + pad)))
# Copy each example into a patch on the display array
curr_ex = 0
for j in np.arange(display_rows):
for i in np.arange(display_cols):
if curr_ex > m:
break
# Get the max value of the patch
max_val = np.max(np.abs(X[curr_ex, : ]))
rows = [pad + j * (example_height + pad) + x for x in np.arange(example_height+1)]
cols = [pad + i * (example_width + pad) + x for x in np.arange(example_width+1)]
display_array[min(rows):max(rows), min(cols):max(cols)] = X[curr_ex, :].reshape(example_height, example_width) / max_val
curr_ex = curr_ex + 1
if curr_ex > m:
break
# Display Image
display_array = display_array.astype('float32')
plt.imshow(display_array.T)
plt.set_cmap('gray')
# Do not show axis
plt.axis('off')
show()
| mit |
poryfly/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
cbuntain/redditResponseExtractor | userCapture.py | 1 | 2377 | #!/usr/bin/python
import praw
from pprint import pprint
import networkx as nx
# try:
# import matplotlib.pyplot as plt
# except:
# raise
def recCommentGrab(graph, comment, parent, level, sub):
if ( isinstance(comment, praw.objects.MoreComments) ):
return
if ( comment.author == None ):
return
# print "\t"*level, parent, "<- replies to -", comment.author
graph.add_node(comment.author, seen=sub)
if ( parent in graph.successors(comment.author) ):
graph[comment.author][parent]['weight'] = graph[comment.author][parent]['weight'] + 1
else:
graph.add_edge(comment.author, parent, weight=1)
if ( len(comment.replies) > 0 ):
for rep in comment.replies:
recCommentGrab(graph, rep, comment.author, level+1, sub)
def extractPosts(graph, redditObj, sub, l=10):
submissions = redditObj.get_subreddit(sub).get_top_from_month(limit=l)
for post in submissions:
if ( post.author == None ):
continue
author = post.author
authorComments = author.get_comments(sort="top",limit=500)
print post.author
commentCount = 0
for comment in authorComments:
if ( comment.is_root ):
submission = comment.submission
print "\t", submission.author
else:
parent = redditObj.get_info(thing_id=comment.parent_id)
print "\t", parent.author
commentCount+=1
print "\tComment count:", commentCount
replyGraph.add_node(post.author, seen=sub)
# post.replace_more_comments(limit=10, threshold=0)
# print "\tComment count: ", len(post.comments)
# commentList = post.comments[:]
# for comment in commentList:
# if ( not isinstance(comment, praw.objects.MoreComments) ):
# recCommentGrab(replyGraph, comment, post.author, 1, sub)
# # else:
# # moreComs = comment.comments()
# # if ( moreComs != None ):
# # commentList.extend(moreComs)
replyGraph = nx.DiGraph()
password = raw_input('Password:')
r = praw.Reddit(user_agent='edu.umd.cs.inst633o.cbuntain')
r.login('proteius',password)
subList = [
'machinelearning',
'compsci'
# 'iama',
# 'askscience',
# # 'askreddit',
# 'AskHistorians',
# 'asksocialscience',
# 'Ask_Politics',
# 'askmen',
# 'askwomen',
]
try:
for sub in subList:
print "Checking on subreddit: /r/", sub
extractPosts(replyGraph, r, sub, 25)
except Exception, e:
print "Failed during execution: ", e
finally:
nx.write_gexf(replyGraph, 'cs_users.gexf')
| mit |
dkesada/SocialBot | socialBot.py | 1 | 17797 | #! /usr/bin/python
#-*. coding: utf-8 -*-
#authors: David Quesada López y Mateo García Fuentes
import sys
import time
import telepot
import telepot.helper
from telepot.delegate import (
per_chat_id, per_callback_query_origin, create_open, pave_event_space)
import googlemaps
from datetime import datetime
import math
import json
import datetime
import db
import steps
import keyboards
import translate
import matplotlib
matplotlib.use('Cairo')#Use this backend to plot and create a png file
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
"""
Api para sacar los locales cercanos a la ubicación que te manden:
https://github.com/googlemaps/google-maps-services-python
Aquí está la documentación de las funciones que tiene:
https://googlemaps.github.io/google-maps-services-python/docs/2.4.5/
Documentacion de telepot:
http://telepot.readthedocs.io/en/latest/reference.html
https://core.telegram.org/bots
http://qingkaikong.blogspot.com.es/2016/02/plot-earthquake-heatmap-on-basemap-and.html
"""
# Readying the google maps client
mapclient = googlemaps.Client(key='AIzaSyBGP8h2WjF8NOC4Covro2kDV2Iv5jT_-7Q') #Input the api places key as the first argument when launching
geoClient = googlemaps.Client(key='AIzaSyC9kpWU3vzPLVIRFQtHCkp6uoIquXdHnYE')
# One UserHandler created per chat_id. May be useful for sorting out users
# Handles chat messages depending on its tipe
class UserHandler(telepot.helper.ChatHandler):
def __init__(self, *args, **kwargs):
super(UserHandler, self).__init__(*args, **kwargs)
def calculateBounds(self, kmeters, loc):
R=6367.45 #media geometrica
bearing = math.radians(45) #45º
lat = math.radians(loc[0]) #lat of the user
lon = math.radians(loc[1]) #lng of the user
latup = math.asin(math.sin(lat)*math.cos(kmeters/R) + math.cos(lat)*math.sin(kmeters/R)*math.cos(bearing))
lonup = lon + math.atan2(math.sin(bearing)*math.sin(kmeters/R)*math.cos(lat), math.cos(kmeters/R)-math.sin(lat)*math.sin(latup))
dlat=latup-lat
dlng=lonup-lon
latdw = lat-dlat
londw = lon-dlng
latup = math.degrees(latup)
lonup = math.degrees(lonup)
latdw = math.degrees(latdw)
londw = math.degrees(londw)
return londw, latdw, lonup, latup
def heatmap(self, allLoc, chat_id):
ln = []
lt = []
for geo in allLoc:
if geo != {}:
ln.append(geo['location']['longitude'])
lt.append(geo['location']['latitude'])
loc = db.getLocation(chat_id)
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat= self.calculateBounds(2., loc)
map = Basemap(llcrnrlon=llcrnrlon,llcrnrlat=llcrnrlat,urcrnrlon=urcrnrlon,urcrnrlat=urcrnrlat, epsg=4326)
map.arcgisimage(service='World_Imagery', xpixels = 1500, verbose= True)
x,y = map(ln, lt)
map.plot(x, y, 'ro', markersize=5,markeredgecolor="none", alpha=0.5)
x0, y0 = map(loc[1], loc[0])
x1, y1 = map(loc[1]-0.001, loc[0]+0.0017)
plt.imshow(plt.imread('loc.png'), extent = (x0, x1, y0, y1))
plt.savefig("out.png")
def on_chat_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg,flavor='chat')
if content_type == 'text':
if msg['text'] == "/start":
steps.saveStep(chat_id, 1)
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.location(lang), reply_markup=keyboards.markupLocation(lang))
elif msg['text'] == "/settings":
steps.saveStep(chat_id, 0)
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.settings(lang), reply_markup=keyboards.settings(lang))
elif msg['text'] == "/heatmap":
locs = db.getAllLocations()
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.takesFew(lang), reply_markup=None)
self.heatmap(locs, chat_id)
bot.sendPhoto(chat_id, open('out.png', 'rb'))
bot.sendMessage(chat_id, translate.afterMap(lang), reply_markup=keyboards.afterMap(lang))
elif msg['text'] == "Default" or msg['text'] == "Por defecto":
db.storeLocation(chat_id, {u'latitude': 40.411085, u'longitude': -3.685014}, msg['date'])
state = 1
steps.saveStep(chat_id, steps.nextStep(state))
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.lookingFor(lang), reply_markup=keyboards.inlineEstablishment(lang))
elif msg['text'] == "/help":
steps.saveStep(chat_id, 8)
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.help(lang), reply_markup=keyboards.inlineBack(lang))
elif msg['text'] == "/stats":
steps.saveStep(chat_id, 9)
lang = db.getLanguage(chat_id)
user = db.getRole(chat_id)
if user == "superuser":
stats = db.getStats()
bot.sendMessage(chat_id, translate.stats(lang, stats), reply_markup=keyboards.inlineBack(lang))
else:
bot.sendMessage(chat_id, translate.noSuperuser(lang), reply_markup=keyboards.inlineBack(lang))
elif steps.getStep(chat_id) == 1:
js = geoClient.geocode(address=msg['text'], components=None, bounds=None, region=None, language='es-ES')
lang = db.getLanguage(chat_id)
location = {u'latitude':js[0]['geometry']['location']['lat'], u'longitude':js[0]['geometry']['location']['lng']}
bot.sendMessage(chat_id, translate.yourPosition(lang, js[0]['formatted_address']), reply_markup=None)
db.storeLocation(chat_id, location, msg['date'])
state = 1
steps.saveStep(chat_id, steps.nextStep(state))
bot.sendMessage(chat_id, translate.lookingFor(lang), reply_markup=keyboards.inlineEstablishment(lang))
else:
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.textNoProcces(lang), reply_markup=keyboards.markupLocation(lang))
elif content_type == 'location':
db.storeLocation(chat_id, msg['location'], msg['date'])
state = 1
steps.saveStep(chat_id, steps.nextStep(state))
lang = db.getLanguage(chat_id)
bot.sendMessage(chat_id, translate.lookingFor(lang), reply_markup=keyboards.inlineEstablishment(lang))
elif content_type == 'photo':
sending = db.getSending(chat_id)['sending']
if sending != None and sending['type'] == 'photo':
index = len(msg['photo'])-1
db.storePlacePhoto(sending['location'], msg['photo'][index]['file_id'])
lang = db.getLanguage(chat_id)
bot.editMessageReplyMarkup(msg_identifier=(chat_id,sending['msg_id']), reply_markup=None)
bot.sendMessage(chat_id, translate.photoRec(db.getLanguage(chat_id)), reply_markup=keyboards.optionsKeyboard(sending['location'], lang))
def on__idle(self, event):
self.close()
def on_close(self, event):
self.close()
# One ButtonHandler created per message that has a button pressed.
# There should only be one message from the bot at a time in a chat, so that
# you modify the same message over and over again.
class ButtonHandler(telepot.helper.CallbackQueryOriginHandler):
def __init__(self, *args, **kwargs):
super(ButtonHandler, self).__init__(*args, **kwargs)
self.state = None
self.chat_id = None
self.loc = None
self.language = None
self.msg = None
self.lim = None
self.pos = None
self.list = None
def placesNearBy(self, establishmentType, chat_id):
data = db.getLocation(chat_id)
latitude = data[0]
longitude = data[1]
settings = db.getSettings(chat_id)
js = mapclient.places(None, location=(latitude, longitude), radius=settings['radius'], language='es-ES', min_price=None, max_price=None, open_now=settings['openE'], type=establishmentType)
uLoc = db.getLocation(chat_id)
message = translate.chooseOne(self.language)
distanceL = {}
rateL = {}
resultList = {}
if js["status"] != 'ZERO_RESULTS':
resultList = js["results"]
msg = translate.loading(self.language)
while "next_page_token" in js:
msg += "."
self.editor.editMessageText(msg, reply_markup=None)
time.sleep(2)
page_token = js["next_page_token"]
js = mapclient.places(None, location=(latitude, longitude), radius=settings['radius'], language='es-ES', min_price=None, max_price=None, open_now=settings['openE'], type=establishmentType, page_token=page_token)
resultList += js["results"]
lim = settings['numberE']
i = 0
while (i < lim) and (i < len(resultList)):
lat = resultList[i]["geometry"]["location"]["lat"]
lng = resultList[i]["geometry"]["location"]["lng"]
location = str(lat) + " " + str(lng)
distance = int((self.haversine(location, uLoc)))
distanceL[distance] = resultList[i]['name']
rate = db.avgRatePlace([str(lng), str(lat)])
if rate != None:
rateL[rate] = resultList[i]['name']
i += 1
self.lim = lim
self.pos = 0
self.list = resultList
rates = sorted(rateL, reverse=True)
pos = sorted(distanceL, key=int)
message += translate.prox(self.language, distanceL, pos)
if rateL != {}:
message += "\n"
message += translate.rated(self.language, rateL, rates)
self.msg = message
db.storePos(chat_id, self.pos)
self.editor.editMessageText(message, reply_markup=keyboards.resultsKeyboard(resultList, self.language, self.pos, lim))
else:
self.editor.editMessageText(translate.noEstablish(self.language), reply_markup=keyboards.inlineBack(self.language))
def on_callback_query(self, msg):
query_id, from_id, query_data = telepot.glance(msg, flavor='callback_query')
answeredQuery = False
if self.state == None:
self.state = steps.getStep(from_id)
self.chat_id = from_id
sending = db.getSending(self.chat_id)
self.language = db.getLanguage(self.chat_id)
if sending != None and 'sending' in sending:
self.loc = sending['sending']['location']
if query_data == "start":
self.state = 1
self.editor.editMessageText(translate.location(self.language), reply_markup=None)
elif query_data == "settings":
self.state = 0
self.editor.editMessageText(translate.settings(self.language), reply_markup=keyboards.settings(self.language))
if query_data == "back":
stp = steps.stepBack(self.state)
if stp != False:
self.state -= 1;
if stp == "Init":
self.state = 1
steps.saveStep(from_id, self.state)
self.editor.editMessageText(translate.location(self.language), reply_markup=None)
elif stp == "Choose Type":
self.editor.editMessageText(translate.lookingFor(self.language), reply_markup=keyboards.inlineEstablishment(self.language))
elif stp == "Choose Establish":
eType = db.getEType(from_id)
self.placesNearBy(eType, from_id)
elif stp == "Info Establish":
self.state -= 1
steps.saveStep(from_id, self.state)
self.editor.editMessageReplyMarkup(reply_markup=None)
bot.sendMessage(self.chat_id, translate.whatWant(self.language), reply_markup=keyboards.optionsKeyboard(self.loc, self.language))
elif query_data == "more":
self.lim = db.getSettings(self.chat_id)['numberE']
self.pos = db.getPos(self.chat_id)
self.pos += self.lim
db.storePos(self.chat_id, self.pos)
self.editor.editMessageText(self.msg, reply_markup=keyboards.resultsKeyboard(self.list, self.language, self.pos, self.lim))
elif query_data == "previous":
self.lim = db.getSettings(self.chat_id)['numberE']
self.pos = db.getPos(self.chat_id)
self.pos -= self.lim
db.storePos(self.chat_id, self.pos)
self.editor.editMessageText(self.msg, reply_markup=keyboards.resultsKeyboard(self.list, self.language, self.pos, self.lim))
elif steps.step(self.state) == "Settings":
if query_data == "language":
self.editor.editMessageText(translate.chooseLang(self.language), reply_markup=keyboards.languages(self.language))
elif query_data == "parameters":
self.editor.editMessageText(translate.choooseParam(self.language), reply_markup=keyboards.parameters(self.language))
elif query_data == "sback":
self.editor.editMessageText(translate.settings(self.language), reply_markup=keyboards.settings(self.language))
elif query_data == "radius":
self.editor.editMessageText(translate.choooseDistance(self.language), reply_markup=keyboards.radius(self.language))
elif query_data == "open":
self.editor.editMessageText(translate.onlyOpen(self.language), reply_markup=keyboards.openE(self.language))
elif query_data == "numResults":
self.editor.editMessageText(translate.howLocals(self.language), reply_markup=keyboards.numE(self.language))
elif query_data == "restart":
self.state = 1;
steps.saveStep(self.chat_id, self.state)
self.editor.editMessageText(translate.location(self.language), reply_markup=None)
else:
option = query_data.split(" ")
if option[0] == "meters":
meters = option[1]
db.storeRadius(from_id, meters)
bot.answerCallbackQuery(query_id, translate.radiusChanged(self.language))
answeredQuery = True
self.editor.editMessageText(translate.whatWant(self.language), reply_markup=keyboards.optionChanged(self.language))
elif option[0] == "bool":
openE = option[1]
db.storeOpen(from_id, openE)
bot.answerCallbackQuery(query_id, translate.openChanged(self.language))
answeredQuery = True
self.editor.editMessageText(translate.whatWant(self.language), reply_markup=keyboards.optionChanged(self.language))
elif option[0] == "language":
self.language = option[1]
db.storeLanguage(from_id, self.language)
bot.answerCallbackQuery(query_id, translate.langChanged(self.language))
answeredQuery = True
self.editor.editMessageText(translate.whatWant(self.language), reply_markup=keyboards.optionChanged(self.language))
elif option[0] == "num":
num = option[1]
db.storeNumberE(from_id, num)
bot.answerCallbackQuery(query_id, translate.numberChanged(self.language))
answeredQuery = True
self.editor.editMessageText(translate.whatWant(self.language), reply_markup=keyboards.optionChanged(self.language))
elif steps.step(self.state) == "Choose Type":
self.state = steps.nextStep(self.state)
db.storeEType(from_id, query_data)
self.placesNearBy(query_data, from_id)
elif steps.step(self.state) == "Choose Establish":
self.state = steps.nextStep(self.state)
steps.saveStep(self.chat_id, self.state) # After this point, the flow of options of the user can branch
data = query_data.split(" ")
lat = data[0]
lng = data[1]
self.loc = [lng, lat]
self.editor.editMessageReplyMarkup(reply_markup=None)
bot.sendLocation(self.chat_id,lat,lng)
rate = db.avgRatePlace(self.loc)
locat = str(lat)+ " " +str(lng)
distance = self.haversine(locat, db.getLocation(self.chat_id))
bot.sendMessage(self.chat_id, translate.hereIts(self.language, rate, distance), reply_markup=keyboards.optionsKeyboard(self.loc, self.language))
elif steps.step(self.state) == "Info Establish":
option = query_data.split(" ")
if option[2] is None:
print option
self.loc = [option[1], option[2]]
if option[0] == "rating":
self.state = steps.nextStep(self.state)
self.editor.editMessageText(translate.yourRate(self.language), reply_markup=keyboards.rating(self.language))
elif option[0] == "photo":
db.preparePhotoSending(from_id, msg['message']['message_id'], self.loc)
self.editor.editMessageText(translate.sendPhoto(self.language), reply_markup=keyboards.inlineBack(self.language))
elif option[0] == "show_photos":
self.state = steps.nextStep(self.state) + 1
steps.saveStep(self.chat_id, self.state)
info = db.getPlaceData(self.loc)
db.preparePhotoSending(from_id, msg['message']['message_id'], self.loc)
self.editor.editMessageReplyMarkup(reply_markup=None)
bot.sendPhoto(from_id, info['photos'][0], reply_markup=keyboards.photos(info, 0, self.language))
elif steps.step(self.state) == "Rating":
db.storeRating(self.loc, from_id, int(query_data))
self.state = steps.nextStep(self.state)
star = u'\u2b50\ufe0f'
text = ''
for i in range(int(query_data)):
text += star
bot.answerCallbackQuery(query_id, text)
answeredQuery = True
self.editor.editMessageText(translate.whatWant(self.language), reply_markup=keyboards.optionsKeyboard(self.loc, self.language))
elif steps.step(self.state) == "Viewing Photos":
self.editor.editMessageReplyMarkup(reply_markup=None)
info = db.getPlaceData(self.loc)
bot.sendPhoto(from_id, info['photos'][int(query_data)], reply_markup=keyboards.photos(info, int(query_data), self.language))
#self.state = steps.nextStep(self.state)
elif steps.step(self.state) == "Come Back":
if query_data == "init":
self.state = 1
self.editor.editMessageText(translate.location(self.language), reply_markup=None)
elif query_data == "type":
self.state = 2
self.editor.editMessageText(translate.lookingFor(self.language), reply_markup=keyboards.inlineEstablishment(self.language))
elif query_data == "establishment":
self.state = 3
eType = db.getEType(from_id)
self.placesNearBy(eType, from_id)
if answeredQuery == False:
bot.answerCallbackQuery(query_id)
def haversine(self, locat, uLoc):
locat = locat.split(" ")
lat2 = float(locat[0])
lng2 = float(locat[1])
lat1 = float(uLoc[0])
lng1 = float(uLoc[1])
rad=math.pi/180
dlat=lat2-lat1
dlng=lng2-lng1
R=6367.45 #media geometrica
a=(math.sin(rad*dlat/2))**2 + math.cos(rad*lat1)*math.cos(rad*lat2)*(math.sin(rad*dlng/2))**2
distance=2*R*math.asin(math.sqrt(a))#kilometers
return distance*1000#meters
def on__idle(self, event):
steps.saveStep(self.chat_id, self.state)
self.close()
def on_close(self, event):
self.close()
TOKEN = '366092875:AAFQUuXo7qz-oK1xdmGWQQEoporpGPunNSA'
bot = telepot.DelegatorBot(TOKEN, [
pave_event_space()(
per_chat_id(), create_open, UserHandler, timeout=180),
pave_event_space()(
per_callback_query_origin(), create_open, ButtonHandler, timeout=180),
])
bot.message_loop(run_forever='Listening')
| mit |
CforED/Machine-Learning | examples/plot_digits_pipe.py | 70 | 1813 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
openpathsampling/openpathsampling | examples/alanine_dipeptide_mstis/alatools.py | 4 | 7508 | import matplotlib.pyplot as plt
import openpathsampling as paths
import numpy as np
import math
class CVSphere(paths.Volume):
"""
Defines a sphere in multi-CV space with center and distance
"""
def __init__(self, cvs, center, radius):
self.cvs = cvs
self.center = center
self.radius = radius
assert(len(cvs) == len(center) == len(radius))
def __call__(self, snapshot):
return math.sqrt(sum(
map(
lambda cv : cv(snapshot)**2
), self.cvs
))
def __and__(self, other):
if isinstance(other, paths.EmptyVolume):
return self
elif isinstance(other, paths.FullVolume):
return other
elif isinstance(other, CVSphere):
dc = np.linalg.norm(np.array(self.center) - np.array(other.center))
# use triangle inequality
if self.radius >= dc + other.radius:
# other is completely in self
return self
elif other.radius >= dc + self.radius:
# self is completely in other
return other
return paths.UnionVolume(
self, other
)
class TwoCVSpherePlot(object):
def __init__(
self, cvs, states, state_centers,
interface_levels, ranges=None):
self.cvs = cvs
self.states = states
self.state_centers = state_centers
self.interface_levels = interface_levels
self._ax1 = 0
self._ax2 = 1
self.figsize = (6, 6)
self.periodic = [math.pi] * len(cvs)
self.zoom = 180 / math.pi
if ranges is None:
self.ranges = ((-180, 180), (-180, 180))
else:
self.ranges = ranges
self.color_fnc = lambda x: (x, x, 0.6)
self.color_fnc = lambda x: (x * 0.5 + 0.4, 0.5 * x + 0.4, 1 * x, 1.0)
def select_axis(self, ax1, ax2):
self._ax1 = ax1
self._ax2 = ax2
def new(self, figsize=None):
if figsize is None:
figsize = self.figsize
plt.figure(figsize=figsize)
def main(self):
n_states = len(self.states)
centers = self.state_centers
levels = self.interface_levels
labels = [state.name[0] for state in self.states]
periodic = (self.periodic[self._ax1], self.periodic[self._ax2])
mirror = [
[-1, 0, 1] if p is not None else [0]
for p in periodic
]
# replace None with zero
periodic = [p or 0 for p in periodic]
plt.plot(
[x[self._ax1] for x in centers],
[x[self._ax2] for x in centers],
'ko')
fig = plt.gcf()
all_levels = sorted(
list(set(
sum(levels, [])
)),
reverse=True
) + [0]
plt.xlabel(self.cvs[self._ax1].name)
plt.ylabel(self.cvs[self._ax2].name)
max_level = max(all_levels)
zoom = self.zoom
for level in all_levels:
for colored in [True, False]:
for state in range(n_states):
center = centers[state]
center = (center[self._ax1], center[self._ax2])
name = labels[state]
if level == 0:
plt.annotate(
name,
xy=center,
xytext=(center[0]+10 + 1, center[1] - 1),
fontsize=20,
color='k'
)
plt.annotate(
name,
xy=center,
xytext=(center[0]+10, center[1]),
fontsize=20,
color='w'
)
if level in levels[state]:
for xp in mirror[0]:
for yp in mirror[1]:
if colored:
circle = plt.Circle(
(center[0] + xp * periodic[0] * zoom * 2,
center[1] + yp * periodic[1] * zoom * 2),
level,
color='w'
)
fig.gca().add_artist(circle)
else:
l = 1.0 * level / max_level
circle = plt.Circle(
(center[0] + xp * periodic[0] * zoom * 2,
center[1] + yp * periodic[1] * zoom * 2),
level - 1,
color=self.color_fnc(l)
)
fig.gca().add_artist(circle)
# plt.axis((-180,180,-180,180))
plt.axis('equal')
plt.xlim(*self.ranges[0])
plt.ylim(*self.ranges[1])
def _cvlines(self, snapshots):
cvs = self.cvs
all_points = [cv(snapshots) for cv in cvs]
ret = []
first = 0
if len(snapshots) > 1:
for d in range(1, len(snapshots)):
flip = False
for c in range(len(cvs)):
if self.periodic[c] is not None and self._periodicflip(
all_points[c][d],
all_points[c][d-1],
self.periodic[c]
):
flip = True
if flip:
ret.append([all_points[c][first:d] for c in range(len(cvs))])
first = d
ret.append([all_points[c][first:d+1] for c in range(len(cvs))])
return ret
@staticmethod
def _periodicflip(val1, val2, period):
return (period**2 - (val1 - val2)**2) < (val1 - val2)**2
def add_trajectory(self, trajectory, line=True, points=True):
angles = self._cvlines(trajectory)
zoom = self.zoom
for angle in angles:
if points:
plt.plot(
zoom * np.array(angle[self._ax1])[:],
zoom * np.array(angle[self._ax2])[:],
'ko',
linewidth=0.5)
if line:
plt.plot(
zoom * np.array(angle[self._ax1])[:],
zoom * np.array(angle[self._ax2])[:],
'k-',
linewidth=0.5)
def add_snapshot(self, snapshot, label=None):
zoom = self.zoom
angle = [cv(snapshot) for cv in self.cvs]
x = zoom * np.array(angle[self._ax1])
y = zoom * np.array(angle[self._ax2])
plt.plot(
x, y,
'w+',
mew=5, ms=14)
plt.plot(
x, y,
'k+',
mew=3, ms=12)
if label is not None:
plt.annotate(
label,
xy=(x, y),
xytext=(x + 6, y + 4),
fontsize=12,
color='w'
)
plt.annotate(
label,
xy=(x, y),
xytext=(x + 5, y + 5),
fontsize=12,
color='k'
) | mit |
jreback/pandas | pandas/tests/generic/test_to_xarray.py | 2 | 4360 | import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import Categorical, DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
class TestDataFrameToXArray:
@pytest.fixture
def df(self):
return DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": Categorical(list("abc")),
"g": date_range("20130101", periods=3),
"h": date_range("20130101", periods=3, tz="US/Eastern"),
}
)
@td.skip_if_no("xarray", "0.10.0")
def test_to_xarray_index_types(self, index, df):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index")
from xarray import Dataset
df.index = index[:3]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
assert result.dims["foo"] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, Dataset)
# idempotency
# datetimes w/tz are preserved
# column names are lost
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(result.to_dataframe(), expected)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray_empty(self, df):
from xarray import Dataset
df.index.name = "foo"
result = df[0:0].to_xarray()
assert result.dims["foo"] == 0
assert isinstance(result, Dataset)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray_with_multiindex(self, df):
from xarray import Dataset
# available in 0.7.1
# MultiIndex
df.index = MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
result = df.to_xarray()
assert result.dims["one"] == 1
assert result.dims["two"] == 3
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(result, expected)
class TestSeriesToXArray:
@td.skip_if_no("xarray", "0.10.0")
def test_to_xarray_index_types(self, index):
if isinstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
from xarray import DataArray
ser = Series(range(len(index)), index=index, dtype="int64")
ser.index.name = "foo"
result = ser.to_xarray()
repr(result)
assert len(result) == len(index)
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
# idempotency
tm.assert_series_equal(result.to_series(), ser)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray_empty(self):
from xarray import DataArray
ser = Series([], dtype=object)
ser.index.name = "foo"
result = ser.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray_with_multiindex(self):
from xarray import DataArray
mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
ser = Series(range(6), dtype="int64", index=mi)
result = ser.to_xarray()
assert len(result) == 2
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, DataArray)
res = result.to_series()
tm.assert_series_equal(res, ser)
| bsd-3-clause |
0todd0000/spm1d | spm1d/examples/stats0d/ex_ci_twosample.py | 1 | 1434 |
import numpy as np
from matplotlib import pyplot
import spm1d
#(0) Load dataset:
dataset = spm1d.data.uv0d.ci2.AnimalsInResearch()
yA,yB = dataset.get_data()
print( dataset )
#(1) Compute confidence intervals:
alpha = 0.05
mu = 0
ci0 = spm1d.stats.ci_twosample(yA, yB, alpha, datum='difference', mu=None) # datum: inter-group mean difference (explicit hypothesis test suppressed using "mu=None")
ci1 = spm1d.stats.ci_twosample(yA, yB, alpha, datum='difference', mu=mu) # datum: inter-group mean difference (hypothesis test regarding a specific inter-group difference "mu=0")
ci2 = spm1d.stats.ci_twosample(yA, yB, alpha, datum='meanA', mu='meanB') # datum: meanA, criterion: whether CI reaches meanB
ci3 = spm1d.stats.ci_twosample(yA, yB, alpha, datum='meanA', mu='tailB') # datum: meanA,criterion:whether CI tails overlap
print( ci0 )
print( ci1 )
print( ci2 )
print( ci3 )
#(2) Plot the CIs:
pyplot.close('all')
pyplot.figure(figsize=(8,8))
ax0 = pyplot.subplot(221); ci0.plot(ax0); ax0.set_title('datum="difference", mu=None', size=10)
ax1 = pyplot.subplot(222); ci1.plot(ax1); ax1.set_title('datum="difference", mu=%.5f'%mu, size=10)
ax2 = pyplot.subplot(223); ci2.plot(ax2); ax2.set_title('datum="meanA", mu="meanB"', size=10)
ax3 = pyplot.subplot(224); ci3.plot(ax3); ax3.set_title('datum="meanA", mu="tailsAB"', size=10)
pyplot.suptitle('Paired sample CIs')
pyplot.show() | gpl-3.0 |
strint/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 51 | 2709 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def input_fn(data_set):
feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}
labels = tf.constant(data_set[LABEL].values)
return feature_cols, labels
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.contrib.layers.real_valued_column(k)
for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Fit
regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)
# Score accuracy
ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions
y = regressor.predict(input_fn=lambda: input_fn(prediction_set))
# .predict() returns an iterator; convert to a list and print predictions
predictions = list(itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
napjon/moocs_solution | ml-udacity/pca/eigenfaces.py | 1 | 5184 |
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
original source: http://scikit-learn.org/stable/auto_examples/applications/face_recognition.html
"""
print __doc__
from time import time
import logging
import pylab as pl
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
np.random.seed(42)
# fot machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print "Total dataset size:"
print "n_samples: %d" % n_samples
print "n_features: %d" % n_features
print "n_classes: %d" % n_classes
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
def pcaTrainAndPredict(n_components):
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
# n_components = 150
print "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0])
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print "done in %0.3fs" % (time() - t0)
eigenfaces = pca.components_.reshape((n_components, h, w))
print "Projecting the input data on the eigenfaces orthonormal basis"
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print "done in %0.3fs" % (time() - t0)
###############################################################################
# Train a SVM classification model
print "Fitting the classifier to the training set"
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print "done in %0.3fs" % (time() - t0)
print "Best estimator found by grid search:"
print clf.best_estimator_
###############################################################################
# Quantitative evaluation of the model quality on the test set
print "Predicting the people names on the testing set"
t0 = time()
y_pred = clf.predict(X_test_pca)
print "done in %0.3fs" % (time() - t0)
print classification_report(y_test, y_pred, target_names=target_names)
print confusion_matrix(y_test, y_pred, labels=range(n_classes))
pcaTrainAndPredict(150)
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
pl.figure(figsize=(1.8 * n_col, 2.4 * n_row))
pl.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
pl.subplot(n_row, n_col, i + 1)
pl.imshow(images[i].reshape((h, w)), cmap=pl.cm.gray)
pl.title(titles[i], size=12)
pl.xticks(())
pl.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
pl.show() | mit |
herilalaina/scikit-learn | sklearn/gaussian_process/tests/test_gpr.py | 22 | 13791 | """Testing for Gaussian process regression """
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, ConstantKernel as C, WhiteKernel
from sklearn.gaussian_process.kernels import DotProduct
from sklearn.utils.testing \
import (assert_true, assert_greater, assert_array_less,
assert_almost_equal, assert_equal, assert_raise_message,
assert_array_almost_equal, assert_array_equal)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=1.0), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)) +
C(1e-5, (1e-5, 1e2))]
def test_gpr_interpolation():
# Test the interpolating property for different kernels.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.)
def test_lml_improving():
# Test that hyperparameter-tuning improves log-marginal likelihood.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
# Test that lml of optimized kernel is stored correctly.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_equal(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood())
def test_converged_to_local_maximum():
# Test that we are in local maximum after hyperparameter-optimization.
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 0]) |
(gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])))
def test_solution_inside_bounds():
# Test that hyperparameter-optimization remains in bounds#
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
def test_lml_gradient():
# Compare analytic and numeric gradient of log marginal likelihood.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpr.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_prior():
# Test that GP prior has mean 0 and identical variances.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
def test_sample_statistics():
# Test that statistics of samples drawn from GP are correct.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(), 1)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert_equal(np.exp(gpr.kernel_.theta), 1.0)
def test_predict_cov_vs_std():
# Test that predicted std.-dev. is consistent with cov's diagonal.
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_greater(np.exp(gpr.kernel_.theta[1]),
np.exp(gpr.kernel_.theta[0]) * 5)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1) \
+ rng.normal(scale=0.1, size=n_samples)
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1.0] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0,).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_y_normalization():
# Test normalization of the target values in GP
# Fitting non-normalizing GP on normalized y and fitting normalizing GP
# on unnormalized y should yield identical results
y_mean = y.mean(0)
y_norm = y - y_mean
for kernel in kernels:
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_mean + y_pred
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None,
normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
assert_almost_equal(y_std_1d, y_std_2d)
assert_almost_equal(y_cov_1d, y_cov_2d)
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
def test_custom_optimizer():
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert_greater(gpr.log_marginal_likelihood(gpr.kernel_.theta),
gpr.log_marginal_likelihood(gpr.kernel.theta))
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
assert_raise_message(np.linalg.LinAlgError,
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator."
% kernel, gpr.fit, X, y)
def test_duplicate_input():
# Test GPR can handle two different output-values for the same input.
for kernel in kernels:
gpr_equal_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = \
GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = \
gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = \
gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = (C(1.0, constant_value_bounds="fixed") *
RBF(1.0, length_scale_bounds="fixed"))
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
def test_K_inv_reset():
y2 = f(X2).ravel()
for kernel in kernels:
# Test that self._K_inv is reset after a new fit
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert_true(hasattr(gpr, '_K_inv'))
assert_true(gpr._K_inv is None)
gpr.predict(X, return_std=True)
assert_true(gpr._K_inv is not None)
gpr.fit(X2, y2)
assert_true(gpr._K_inv is None)
gpr.predict(X2, return_std=True)
gpr2 = GaussianProcessRegressor(kernel=kernel).fit(X2, y2)
gpr2.predict(X2, return_std=True)
# the value of K_inv should be independent of the first fit
assert_array_equal(gpr._K_inv, gpr2._K_inv)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.