repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Adai0808/BuildingMachineLearningSystemsWithPython | ch12/chapter.py | 20 | 2634 | from jug import TaskGenerator
from glob import glob
import mahotas as mh
@TaskGenerator
def compute_texture(im):
from features import texture
imc = mh.imread(im)
return texture(mh.colors.rgb2gray(imc))
@TaskGenerator
def chist_file(fname):
from features import chist
im = mh.imread(fname)
return chist(im)
import numpy as np
to_array = TaskGenerator(np.array)
hstack = TaskGenerator(np.hstack)
haralicks = []
chists = []
labels = []
# Change this variable to point to
# the location of the dataset is on disk
basedir = '../SimpleImageDataset/'
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
for fname in sorted(images):
haralicks.append(compute_texture(fname))
chists.append(chist_file(fname))
# The class is encoded in the filename as xxxx00.jpg
labels.append(fname[:-len('00.jpg')])
haralicks = to_array(haralicks)
chists = to_array(chists)
labels = to_array(labels)
@TaskGenerator
def accuracy(features, labels):
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import cross_validation
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
cv = cross_validation.LeaveOneOut(len(features))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
return scores.mean()
scores_base = accuracy(haralicks, labels)
scores_chist = accuracy(chists, labels)
combined = hstack([chists, haralicks])
scores_combined = accuracy(combined, labels)
@TaskGenerator
def print_results(scores):
with open('results.image.txt', 'w') as output:
for k,v in scores:
output.write('Accuracy [{}]: {:.1%}\n'.format(
k, v.mean()))
print_results([
('base', scores_base),
('chists', scores_chist),
('combined' , scores_combined),
])
@TaskGenerator
def compute_lbp(fname):
from mahotas.features import lbp
imc = mh.imread(fname)
im = mh.colors.rgb2grey(imc)
return lbp(im, radius=8, points=6)
lbps = []
for fname in sorted(images):
# the rest of the loop as before
lbps.append(compute_lbp(fname))
lbps = to_array(lbps)
scores_lbps = accuracy(lbps, labels)
combined_all = hstack([chists, haralicks, lbps])
scores_combined_all = accuracy(combined_all, labels)
print_results([
('base', scores_base),
('chists', scores_chist),
('lbps', scores_lbps),
('combined' , scores_combined),
('combined_all' , scores_combined_all),
])
| mit |
MengGuo/mix_initiative | utilities/map/vis_sim.py | 1 | 12697 | from math import exp
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
import pickle
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
def draw_map(img_dir):
im = Image.open(img_dir)
pix = im.load()
Nx = im.size[0]
Ny = im.size[1]
scale = 0.02
# print im.size
# x = 100
# y= 100
# print pix[x,y] #Get the RGBA Value of the a pixel of an image
# print pix[0,0]
# (1000, 450)
# (255, 255, 255)
# (0, 0, 0)
fig = plt.figure()
ax = fig.add_subplot(111)
for nx in range(0, Nx, 5)+[Nx-1]:
for ny in range(0, Ny, 5)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
ax.set_xlim([0,(Nx-1)*scale])
plt.axis('off')
plt.axis('equal')
fig.tight_layout(pad=0)
fig.savefig('map.pdf',bbox_inches='tight', pad_inches=0)
return fig
def plot_traj(img_dir, A_robot_pose, A_control):
robot_x = []
robot_y = []
hi_c = []
sample = 5
L = range(len(A_robot_pose)-1)
for k in L[0::sample]:
robot_pose = A_robot_pose[k]
robot_x.append(robot_pose[1][0])
robot_y.append(robot_pose[1][1])
h_control = A_control[k][0]
if (abs(h_control[0])+abs(h_control[1]))>0.1:
hi_c.append(1)
else:
hi_c.append(0)
im = Image.open(img_dir)
pix = im.load()
Nx = im.size[0]
Ny = im.size[1]
scale = 0.02
Ns = 2
# plot map
fig = plt.figure()
ax1 = fig.add_subplot(121)
for nx in range(0, Nx, Ns)+[Nx-1]:
for ny in range(0, Ny, Ns)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax1.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
# plot pre hi
print 'traj length', len(robot_x)
k1 = [0, 180/sample] # initial
k2 = [180/sample, 240/sample] #hi
k3 = [240/sample,1110/sample] # normal
k4 = [1100/sample, 1200/sample] #hi
k5 = [1200/sample, 2100/sample] #update
k6 = [2100/sample, 2450/sample] #temp
ax1.plot(robot_x[k1[0]:k1[1]], robot_y[k1[0]:k1[1]], color='b',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=2)
ax1.plot(robot_x[k2[0]:k2[1]], robot_y[k2[0]:k2[1]], color='r',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=7, label=r'HIL')
ax1.plot(robot_x[k3[0]:k3[1]], robot_y[k3[0]:k3[1]], color='b',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5, label=r'$\tau_r^0$')
#---------- print regions of interest
ap = ['r_0', 'r_1', 'r_2',
'r_3', 'r_4', 'r_5',
'r_6', 'r_7', 'r_8',
'c_1', 'c_2', 'c_3',
'c_4']
roi = [(2.5, 1.5, 0.5), (8.5, 0.5, 0.5), (12.5, 1.5, 0.5),
(17.5, 1.5, 0.5), (8.5, 4.5, 0.5), (14.5, 4.5, 0.5),
(18.5, 4.5, 0.5), (3.0, 8.0, 0.7), (11.5, 8.5, 0.5),
(2.0, 6.0, 1.0), (11.0, 4.0, 1.0), (17.0, 4.0, 0.7),
(8.0, 7.0, 1.0),
]
for k in range(len(roi)):
reg = roi[k]
rec = matplotlib.patches.Rectangle((reg[0]-reg[2], reg[1]-reg[2]), reg[2]*2, reg[2]*2, fill = True, facecolor = 'cyan', edgecolor = 'black', linewidth = 1, alpha =0.8)
ax1.add_patch(rec)
ax1.text(reg[0], reg[1]-0.1, r'$%s$' %ap[k], fontsize = 20, fontweight = 'bold', zorder = 3)
ax1.legend(ncol=1,bbox_to_anchor=(0.78,0.56),loc='lower left', borderpad=0.1, labelspacing=0.2, columnspacing= 0.3, numpoints=3, prop={'size': 10})
ax1.grid()
ax1.set_xlim([0,(Nx-1)*scale])
ax1.axis('off')
ax1.axis('equal')
#==============================
ax2 = fig.add_subplot(122)
for nx in range(0, Nx, Ns)+[Nx-1]:
for ny in range(0, Ny, Ns)+[Ny-1]:
if pix[nx,ny][0] == 0:
ax2.plot(nx*scale, (Ny-ny)*scale, color='k',
marker='s', markersize=1)
# plot pre hi
print 'traj length', len(robot_x)
k1 = [0, 100/sample] # initial
k2 = [100/sample, 250/sample] #hi
k3 = [250/sample,1110/sample] # normal
k4 = [1100/sample, 1200/sample] #hi
k5 = [1200/sample, 2110/sample] #update
k6 = [2100/sample, 2390/sample] #temp
ax2.plot(robot_x[k4[0]:k4[1]], robot_y[k4[0]:k4[1]], color='r',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=6, label=r'HIL')
ax2.plot(robot_x[k5[0]:k5[1]], robot_y[k5[0]:k5[1]], color='g',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5, label=r'$\tau_r^t$')
ax2.plot(robot_x[k6[0]:k6[1]], robot_y[k6[0]:k6[1]], color='m',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=7, label=r'$\varphi_{\textup{temp}}$')
ax2.plot(robot_x[(k6[1]-10):], robot_y[(k6[1]-10):], color='g',
linestyle='-',linewidth=2, marker='o', mfc='grey',
fillstyle='full', markersize=2.3, zorder=5)
#---------- print regions of interest
ap = ['r_0', 'r_1', 'r_2',
'r_3', 'r_4', 'r_5',
'r_6', 'r_7', 'r_8',
'c_1', 'c_2', 'c_3',
'c_4']
roi = [(2.5, 1.5, 0.5), (8.5, 0.5, 0.5), (12.5, 1.5, 0.5),
(17.5, 1.5, 0.5), (8.5, 4.5, 0.5), (14.5, 4.5, 0.5),
(18.5, 4.5, 0.5), (3.0, 8.0, 0.7), (11.5, 8.5, 0.5),
(2.0, 6.0, 1.0), (11.0, 4.0, 1.0), (17.0, 4.0, 0.7),
(8.0, 7.0, 1.0),
]
for k in range(len(roi)):
reg = roi[k]
rec = matplotlib.patches.Rectangle((reg[0]-reg[2], reg[1]-reg[2]), reg[2]*2, reg[2]*2, fill = True, facecolor = 'cyan', edgecolor = 'black', linewidth = 1, alpha =0.8)
ax2.add_patch(rec)
ax2.text(reg[0], reg[1]-0.1, r'$%s$' %ap[k], fontsize = 20, fontweight = 'bold', zorder = 3)
ax2.legend(ncol=1,bbox_to_anchor=(0.78,0.56),loc='lower left', borderpad=0.1, labelspacing=0.1, columnspacing= 0.1, numpoints=3, prop={'size': 7.8})
ax2.grid()
ax2.set_xlim([0,(Nx-1)*scale])
ax2.axis('off')
ax2.axis('equal')
fig.tight_layout(pad=0)
fig.savefig('traj_sim_2_zoom.pdf',bbox_inches='tight', pad_inches=0)
def plot_control(A_control):
c_linear = []
c_angular = []
h_linear = []
h_angular = []
m_linear = []
m_angular = []
for control in A_control:
[tele_control, navi_control, mix_control] = control
c_linear.append(navi_control[0])
c_angular.append(navi_control[1])
h_linear.append(tele_control[0])
h_angular.append(tele_control[1])
m_linear.append(mix_control[0])
m_angular.append(mix_control[1])
#------------------------------ plot v
step = 1.0
T = [t*step for t in range(len(A_control))]
fig = plt.figure(figsize=(10,3))
ax = fig.add_subplot(111)
ax.plot(T, c_linear, linestyle='--',
linewidth=2.0,
color='blue',label=r'$u_r[v]$',zorder = 3)
ax.plot(T, h_linear, linestyle='--',
linewidth=2.0,
color='red',label=r'$u_h[v]$',zorder = 4)
ax.plot(T, m_linear, linestyle='-',
linewidth=2.0,
color='black',label=r'$u[v]$',zorder = 2)
ax.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
ax.grid()
ax.set_xlabel(r'$t(s)$')
ax.set_ylabel(r'$v(m/s)$')
ax.set_xlim(0, step*(len(A_control)))
ax.set_ylim(-0.5, 1.1)
#-------------------- plot w
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# fig = plt.figure(figsize=(10,3))
# ax = fig.add_subplot(111)
# ax.plot(T, c_angular, linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax.plot(T, h_angular, linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax.plot(T, m_angular, linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax.grid()
# ax.set_xlabel(r'$t(s)$')
# ax.set_ylabel(r'$\omega(rad/s)$')
# ax.set_xlim(0, step*(len(A_control)))
# ax.set_ylim(-1.1, 1.5)
#------------------------------ zoom in v
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# k1 = 710
# k2 = 910
# k3 = 1200
# k4 = 1400
# fig = plt.figure(figsize=(10,3))
# ax1 = fig.add_subplot(121)
# ax1.plot(T[k1:k2], c_linear[k1:k2], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[v]$',zorder = 3)
# ax1.plot(T[k1:k2], h_linear[k1:k2], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[v]$',zorder = 4)
# ax1.plot(T[k1:k2], m_linear[k1:k2], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[v]$',zorder = 2)
# ax1.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax1.grid()
# ax1.set_xlabel(r'$t(s)$')
# ax1.set_ylabel(r'$v(m/s)$')
# ax1.set_xlim(k1*step, k2*step)
# ax1.set_ylim(-0.5, 1.1)
# ax2 = fig.add_subplot(122)
# ax2.plot(T[k3:k4], c_linear[k3:k4], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[v]$',zorder = 3)
# ax2.plot(T[k3:k4], h_linear[k3:k4], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[v]$',zorder = 4)
# ax2.plot(T[k3:k4], m_linear[k3:k4], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[v]$',zorder = 2)
# ax2.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax2.grid()
# ax2.set_xlabel(r'$t(s)$')
# ax2.set_ylabel(r'$v(m/s)$')
# ax2.set_xlim(k3*step, k4*step)
# ax2.set_ylim(-0.5, 1.1)
# ------------------------------ zoom in w
# step = 1.0
# T = [t*step for t in range(len(A_control))]
# k1 = 710
# k2 = 910
# k3 = 1200
# k4 = 1400
# fig = plt.figure(figsize=(10,3))
# ax1 = fig.add_subplot(121)
# ax1.plot(T[k1:k2], c_angular[k1:k2], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax1.plot(T[k1:k2], h_angular[k1:k2], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax1.plot(T[k1:k2], m_angular[k1:k2], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax1.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax1.grid()
# ax1.set_xlabel(r'$t(s)$')
# ax1.set_ylabel(r'$\omega(rad/s)$')
# ax1.set_xlim(k1*step, k2*step)
# ax1.set_ylim(-1.1, 1.5)
# ax2 = fig.add_subplot(122)
# ax2.plot(T[k3:k4], c_angular[k3:k4], linestyle='--',
# linewidth=2.0,
# color='blue',label=r'$u_r[\omega]$',zorder = 3)
# ax2.plot(T[k3:k4], h_angular[k3:k4], linestyle='--',
# linewidth=2.0,
# color='red',label=r'$u_h[\omega]$',zorder = 4)
# ax2.plot(T[k3:k4], m_angular[k3:k4], linestyle='-',
# linewidth=2.0,
# color='black',label=r'$u[\omega]$',zorder = 2)
# ax2.legend(ncol=3,loc='upper left',borderpad=0.1, labelspacing=0.2, columnspacing= 0.5)
# ax2.grid()
# ax2.set_xlabel(r'$t(s)$')
# ax2.set_ylabel(r'$\omega(rad/s)$')
# ax2.set_xlim(k3*step, k4*step)
# ax2.set_ylim(-1.1, 1.5)
plt.savefig(r'sim_control_v_2.pdf', bbox_inches = 'tight')
def plot_beta(A_beta):
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.plot(range(len(A_beta)), A_beta, color='b',
linestyle='-', linewidth=5, marker='o', mfc='r',
fillstyle='full', markersize=6, zorder=2)
ax.set_xlabel(r'$Iteration$')
ax.set_ylabel(r'$\beta_k$')
ax.set_xlim(0, len(A_beta))
ax.set_ylim(0, 12)
ax.grid()
plt.savefig(r'sim_beta_2.pdf', bbox_inches = 'tight')
if __name__ == "__main__":
A_robot_pose, A_control, A_beta = pickle.load(open('tiago_sim_case_two.p', 'rb'))
# draw_map('map.png')
plot_traj('map.png', A_robot_pose, A_control)
# plot_control(A_control)
# plot_beta(A_beta[0])
# print A_beta[0]
| gpl-2.0 |
brandon-rhodes/numpy | numpy/doc/creation.py | 118 | 5507 | """
==============
Array Creation
==============
Introduction
============
There are 5 general mechanisms for creating arrays:
1) Conversion from other Python structures (e.g., lists, tuples)
2) Intrinsic numpy array array creation objects (e.g., arange, ones, zeros,
etc.)
3) Reading arrays from disk, either from standard or custom formats
4) Creating arrays from raw bytes through the use of strings or buffers
5) Use of special library functions (e.g., random)
This section will not cover means of replicating, joining, or otherwise
expanding or mutating existing arrays. Nor will it cover creating object
arrays or structured arrays. Both of those are covered in their own sections.
Converting Python array_like Objects to Numpy Arrays
====================================================
In general, numerical data arranged in an array-like structure in Python can
be converted to arrays through the use of the array() function. The most
obvious examples are lists and tuples. See the documentation for array() for
details for its use. Some objects may support the array-protocol and allow
conversion to arrays this way. A simple way to find out if the object can be
converted to a numpy array using array() is simply to try it interactively and
see if it works! (The Python Way).
Examples: ::
>>> x = np.array([2,3,1,0])
>>> x = np.array([2, 3, 1, 0])
>>> x = np.array([[1,2.0],[0,0],(1+1j,3.)]) # note mix of tuple and lists,
and types
>>> x = np.array([[ 1.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j], [ 1.+1.j, 3.+0.j]])
Intrinsic Numpy Array Creation
==============================
Numpy has built-in functions for creating arrays from scratch:
zeros(shape) will create an array filled with 0 values with the specified
shape. The default dtype is float64.
``>>> np.zeros((2, 3))
array([[ 0., 0., 0.], [ 0., 0., 0.]])``
ones(shape) will create an array filled with 1 values. It is identical to
zeros in all other respects.
arange() will create arrays with regularly incrementing values. Check the
docstring for complete information on the various ways it can be used. A few
examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=np.float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note that there are some subtleties regarding the last usage that the user
should be aware of that are described in the arange docstring.
linspace() will create arrays with a specified number of elements, and
spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that one can guarantee the
number of elements and the starting and end point, which arange()
generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
This is particularly useful for evaluating functions of multiple dimensions on
a regular grid.
Reading Arrays From Disk
========================
This is presumably the most common case of large array creation. The details,
of course, depend greatly on the format of data on disk and so this section
can only give general pointers on how to handle various formats.
Standard Binary Formats
-----------------------
Various fields have standard formats for array data. The following lists the
ones with known python libraries to read them and return numpy arrays (there
may be others for which it is possible to read and convert to numpy arrays so
check the last section as well)
::
HDF5: PyTables
FITS: PyFITS
Examples of formats that cannot be read directly but for which it is not hard to
convert are those formats supported by libraries like PIL (able to read and
write many image formats such as jpg, png, etc).
Common ASCII Formats
------------------------
Comma Separated Value files (CSV) are widely used (and an export and import
option for programs like Excel). There are a number of ways of reading these
files in Python. There are CSV functions in Python and functions in pylab
(part of matplotlib).
More generic ascii files can be read using the io package in scipy.
Custom Binary Formats
---------------------
There are a variety of approaches one can use. If the file has a relatively
simple format then one can write a simple I/O library and use the numpy
fromfile() function and .tofile() method to read and write numpy arrays
directly (mind your byteorder though!) If a good C or C++ library exists that
read the data, one can wrap that library with a variety of techniques though
that certainly is much more work and requires significantly more advanced
knowledge to interface with C or C++.
Use of Special Libraries
------------------------
There are libraries that can be used to generate arrays for special purposes
and it isn't possible to enumerate all of them. The most common uses are use
of the many array generation functions in random that can generate arrays of
random values, and some utility functions to generate special matrices (e.g.
diagonal).
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
mugizico/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 84 | 14181 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(return_indicator=True,
allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
return_indicator=True,
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
CalebBell/thermo | thermo/eos.py | 1 | 469261 | # -*- coding: utf-8 -*-
r'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019, 2020 Caleb Bell <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
This module contains implementations of most cubic equations of state for
pure components. This includes Peng-Robinson, SRK, Van der Waals, PRSV, TWU and
many other variants.
For reporting bugs, adding feature requests, or submitting pull requests,
please use the `GitHub issue tracker <https://github.com/CalebBell/thermo/>`_.
.. contents:: :local:
Base Class
==========
.. autoclass:: GCEOS
:members:
:undoc-members:
:special-members: __repr__
:show-inheritance:
:exclude-members: _P_zero_g_cheb_coeffs, _P_zero_l_cheb_coeffs,
main_derivatives_and_departures, derivatives_and_departures
Standard Peng-Robinson Family EOSs
==================================
Standard Peng Robinson
----------------------
.. autoclass:: PR
:show-inheritance:
:members: a_alpha_pure, a_alpha_and_derivatives_pure, d3a_alpha_dT3_pure,
solve_T, P_max_at_V, c1, c2, Zc
Peng Robinson (1978)
--------------------
.. autoclass:: PR78
:show-inheritance:
:members: low_omega_constants, high_omega_constants
Peng Robinson Stryjek-Vera
--------------------------
.. autoclass:: PRSV
:show-inheritance:
:members: solve_T, a_alpha_and_derivatives_pure, a_alpha_pure
Peng Robinson Stryjek-Vera 2
----------------------------
.. autoclass:: PRSV2
:show-inheritance:
:members: solve_T, a_alpha_and_derivatives_pure, a_alpha_pure
Peng Robinson Twu (1995)
------------------------
.. autoclass:: TWUPR
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure
Peng Robinson Polynomial alpha Function
---------------------------------------
.. autoclass:: PRTranslatedPoly
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure
Volume Translated Peng-Robinson Family EOSs
===========================================
Peng Robinson Translated
------------------------
.. autoclass:: PRTranslated
:show-inheritance:
:members: __init__
:exclude-members: __init__
Peng Robinson Translated Twu (1991)
-----------------------------------
.. autoclass:: PRTranslatedTwu
:show-inheritance:
:members: __init__
:exclude-members: __init__
Peng Robinson Translated-Consistent
-----------------------------------
.. autoclass:: PRTranslatedConsistent
:show-inheritance:
:members: __init__
:exclude-members: __init__
Peng Robinson Translated (Pina-Martinez, Privat, and Jaubert Variant)
---------------------------------------------------------------------
.. autoclass:: PRTranslatedPPJP
:show-inheritance:
:members: __init__
:exclude-members: __init__
Soave-Redlich-Kwong Family EOSs
===============================
Standard SRK
------------
.. autoclass:: SRK
:show-inheritance:
:members: c1, c2, epsilon, Zc, a_alpha_and_derivatives_pure, a_alpha_pure, P_max_at_V, solve_T
Twu SRK (1995)
--------------
.. autoclass:: TWUSRK
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure
API SRK
-------
.. autoclass:: APISRK
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure, solve_T
SRK Translated
--------------
.. autoclass:: SRKTranslated
:show-inheritance:
:members: __init__
:exclude-members: __init__
SRK Translated-Consistent
-------------------------
.. autoclass:: SRKTranslatedConsistent
:show-inheritance:
:members: __init__
:exclude-members: __init__
SRK Translated (Pina-Martinez, Privat, and Jaubert Variant)
-----------------------------------------------------------
.. autoclass:: SRKTranslatedPPJP
:show-inheritance:
:members: __init__
:exclude-members: __init__
MSRK Translated
---------------
.. autoclass:: MSRKTranslated
:show-inheritance:
:members: estimate_MN
Van der Waals Equations of State
================================
.. autoclass:: VDW
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure, solve_T, T_discriminant_zeros_analytical, P_discriminant_zeros_analytical, delta, epsilon, omega, Zc
Redlich-Kwong Equations of State
================================
.. autoclass:: RK
:show-inheritance:
:members: a_alpha_and_derivatives_pure, a_alpha_pure, solve_T, T_discriminant_zeros_analytical, epsilon, omega, Zc, c1, c2
Ideal Gas Equation of State
===========================
.. autoclass:: IG
:show-inheritance:
:members: volume_solutions, Zc, a, b, delta, epsilon, a_alpha_pure, a_alpha_and_derivatives_pure, solve_T
Lists of Equations of State
===========================
.. autodata:: eos_list
.. autodata:: eos_2P_list
Demonstrations of Concepts
==========================
Maximum Pressure at Constant Volume
-----------------------------------
Some equations of state show this behavior. At a liquid volume, if the
temperature is increased, the pressure should increase as well to create that
same volume. However in some cases this is not the case as can be demonstrated
for this hypothetical dodecane-like fluid:
.. plot:: plots/PR_maximum_pressure.py
Through experience, it is observed that this behavior is only shown for some
sets of critical constants. It was found that if the expression for
:math:`\frac{\partial P}{\partial T}_{V}` is set to zero, an analytical
expression can be determined for exactly what that maximum pressure is.
Some EOSs implement this function as `P_max_at_V`; those that don't, and fluids
where there is no maximum pressure, will have that method but it will return None.
Debug Plots to Understand EOSs
------------------------------
The :obj:`GCEOS.volume_errors` method shows the relative error in the volume
solution. `mpmath` is requried for this functionality. It is not likely there
is an error here but many problems have been found in the past.
.. plot:: plots/PRTC_volume_error.py
The :obj:`GCEOS.PT_surface_special` method shows some of the special curves of
the EOS.
.. plot:: plots/PRTC_PT_surface_special.py
The :obj:`GCEOS.a_alpha_plot` method shows the alpha function curve. The
following sample shows the SRK's default alpha function for methane.
.. plot:: plots/SRK_a_alpha.py
If this doesn't look healthy, that is because it is not. There are strict
thermodynamic consistency requirements that we know of today:
* The alpha function must be positive and continuous
* The first derivative must be negative and continuous
* The second derivative must be positive and continuous
* The third derivative must be negative
The first criterial and second criteria fail here.
There are two methods to review the saturation properties solution.
The more general way is to review saturation properties as a plot:
.. plot:: plots/SRK_H_dep.py
.. plot:: plots/SRK_fugacity.py
The second plot is more detailed, and is focused on the direct calculation of
vapor pressure without using an iterative solution. It shows the relative
error of the fit, which normally way below where it would present any issue -
only 10-100x more error than it is possible to get with floating point numbers
at all.
.. plot:: plots/SRK_Psat_error.py
'''
from __future__ import division, print_function
__all__ = ['GCEOS', 'PR', 'SRK', 'PR78', 'PRSV', 'PRSV2', 'VDW', 'RK',
'APISRK', 'TWUPR', 'TWUSRK', 'eos_list', 'eos_2P_list',
'IG', 'PRTranslatedPPJP', 'SRKTranslatedPPJP',
'PRTranslatedConsistent', 'SRKTranslatedConsistent', 'MSRKTranslated',
'SRKTranslated', 'PRTranslated', 'PRTranslatedCoqueletChapoyRichon',
'PRTranslatedTwu', 'PRTranslatedPoly',
]
__all__.extend(['main_derivatives_and_departures',
'main_derivatives_and_departures_VDW',
'eos_lnphi'])
from cmath import log as clog
from math import isnan, isinf
from fluids.numerics import (chebval, brenth, third, sixth, roots_cubic,
roots_cubic_a1, numpy as np, newton,
bisect, inf, polyder, chebder, is_micropython,
trunc_exp, secant, linspace, logspace,
horner, horner_and_der, horner_and_der2, derivative,
roots_cubic_a2, isclose, NoSolutionError,
roots_quartic, deflate_cubic_real_roots,
catanh)
from fluids.constants import mmHg, R
from chemicals.utils import (Cp_minus_Cv, isobaric_expansion,
isothermal_compressibility,
phase_identification_parameter, hash_any_primitive)
from chemicals.utils import log, log10, exp, sqrt, copysign
from chemicals.flash_basic import Wilson_K_value
from thermo import serialize
from thermo.eos_volume import (volume_solutions_mpmath, volume_solutions_mpmath_float,
volume_solutions_NR, volume_solutions_NR_low_P,
volume_solutions_halley, volume_solutions_fast,
volume_solutions_Cardano, volume_solutions_numpy,
volume_solutions_ideal, volume_solutions_a1, volume_solutions_a2,
volume_solutions_doubledouble_float)
from thermo.eos_alpha_functions import (Poly_a_alpha, Twu91_a_alpha, Mathias_Copeman_poly_a_alpha,
TwuSRK95_a_alpha, TwuPR95_a_alpha, Soave_1979_a_alpha,
TWU_a_alpha_common)
R2 = R*R
R_2 = 0.5*R
R_inv = 1.0/R
R_inv2 = R_inv*R_inv
def main_derivatives_and_departures(T, P, V, b, delta, epsilon, a_alpha,
da_alpha_dT, d2a_alpha_dT2):
epsilon2 = epsilon + epsilon
x0 = 1.0/(V - b)
x1 = 1.0/(V*(V + delta) + epsilon)
x3 = R*T
x4 = x0*x0
x5 = V + V + delta
x6 = x1*x1
x7 = a_alpha*x6
x8 = P*V
x9 = delta*delta
x10 = x9 - epsilon2 - epsilon2
try:
x11 = 1.0/sqrt(x10)
except:
# Needed for ideal gas model
x11 = 0.0
x11_half = 0.5*x11
# arg = x11*x5
# arg2 = (arg + 1.0)/(arg - 1.0)
# fancy = 0.25*log(arg2*arg2)
# x12 = 2.*x11*fancy # Possible to use a catan, but then a complex division and sq root is needed too
x12 = 2.*x11*catanh(x11*x5).real # Possible to use a catan, but then a complex division and sq root is needed too
x14 = 0.5*x5
x15 = epsilon2*x11
x16 = x11_half*x9
x17 = x5*x6
dP_dT = R*x0 - da_alpha_dT*x1
dP_dV = x5*x7 - x3*x4
d2P_dT2 = -d2a_alpha_dT2*x1
d2P_dV2 = (x7 + x3*x4*x0 - a_alpha*x5*x17*x1)
d2P_dV2 += d2P_dV2
d2P_dTdV = da_alpha_dT*x17 - R*x4
H_dep = x12*(T*da_alpha_dT - a_alpha) - x3 + x8
t1 = (x3*x0/P)
S_dep = -R*log(t1) + da_alpha_dT*x12 # Consider Real part of the log only via log(x**2)/2 = Re(log(x))
# S_dep = -R_2*log(t1*t1) + da_alpha_dT*x12 # Consider Real part of the log only via log(x**2)/2 = Re(log(x))
x18 = x16 - x15
x19 = (x14 + x18)/(x14 - x18)
Cv_dep = T*d2a_alpha_dT2*x11*(log(x19)) # Consider Real part of the log only via log(x**2)/2 = Re(log(x))
return dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep
def main_derivatives_and_departures_VDW(T, P, V, b, delta, epsilon, a_alpha,
da_alpha_dT, d2a_alpha_dT2):
'''Re-implementation of derivatives and excess property calculations,
as ZeroDivisionError errors occur with the general solution. The
following derivation is the source of these formulas.
>>> from sympy import *
>>> P, T, V, R, b, a = symbols('P, T, V, R, b, a')
>>> P_vdw = R*T/(V-b) - a/(V*V)
>>> vdw = P_vdw - P
>>>
>>> dP_dT = diff(vdw, T)
>>> dP_dV = diff(vdw, V)
>>> d2P_dT2 = diff(vdw, T, 2)
>>> d2P_dV2 = diff(vdw, V, 2)
>>> d2P_dTdV = diff(vdw, T, V)
>>> H_dep = integrate(T*dP_dT - P_vdw, (V, oo, V))
>>> H_dep += P*V - R*T
>>> S_dep = integrate(dP_dT - R/V, (V,oo,V))
>>> S_dep += R*log(P*V/(R*T))
>>> Cv_dep = T*integrate(d2P_dT2, (V,oo,V))
>>>
>>> dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep
(R/(V - b), -R*T/(V - b)**2 + 2*a/V**3, 0, 2*(R*T/(V - b)**3 - 3*a/V**4), -R/(V - b)**2, P*V - R*T - a/V, R*(-log(V) + log(V - b)) + R*log(P*V/(R*T)), 0)
'''
V_inv = 1.0/V
V_inv2 = V_inv*V_inv
Vmb = V - b
Vmb_inv = 1.0/Vmb
dP_dT = R*Vmb_inv
dP_dV = -R*T*Vmb_inv*Vmb_inv + 2.0*a_alpha*V_inv*V_inv2
d2P_dT2 = 0.0
d2P_dV2 = 2.0*(R*T*Vmb_inv*Vmb_inv*Vmb_inv - 3.0*a_alpha*V_inv2*V_inv2) # Causes issues at low T when V fourth power fails
d2P_dTdV = -R*Vmb_inv*Vmb_inv
H_dep = P*V - R*T - a_alpha*V_inv
S_dep = R*(-log(V) + log(Vmb)) + R*log(P*V/(R*T))
Cv_dep = 0.0
return (dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep)
def eos_lnphi(T, P, V, b, delta, epsilon, a_alpha):
r'''Calculate the log fugacity coefficient of the general cubic equation
of state form.
.. math::
\ln \phi = \frac{P V}{R T} + \log{\left(V \right)} - \log{\left(\frac{P
V}{R T} \right)} - \log{\left(V - b \right)} - 1 - \frac{2 a {\alpha}
\operatorname{atanh}{\left(\frac{2 V}{\sqrt{\delta^{2} - 4 \epsilon}}
+ \frac{\delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right)}}
{R T \sqrt{\delta^{2} - 4 \epsilon}}
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
b : float
Coefficient calculated by EOS-specific method, [m^3/mol]
delta : float
Coefficient calculated by EOS-specific method, [m^3/mol]
epsilon : float
Coefficient calculated by EOS-specific method, [m^6/mol^2]
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Returns
-------
lnphi : float
Log fugacity coefficient, [-]
Examples
--------
>>> eos_lnphi(299.0, 100000.0, 0.00013128, 0.000109389, 0.00021537, -1.1964711e-08, 3.8056296)
-1.560560970726
'''
RT = R*T
RT_inv = 1.0/RT
x0 = 1.0/sqrt(delta*delta - 4.0*epsilon)
arg = 2.0*V*x0 + delta*x0
fancy = catanh(arg).real
# Possible optimization, numerical analysis required.
# arg2 = (arg + 1.0)/(arg - 1.0)
# fancy = 0.25*log(arg2*arg2)
return (P*V*RT_inv + log(RT/(P*(V-b))) - 1.0
- 2.0*a_alpha*fancy*RT_inv*x0)
class GCEOS(object):
r'''Class for solving a generic Pressure-explicit three-parameter cubic
equation of state. Does not implement any parameters itself; must be
subclassed by an equation of state class which uses it. Works for mixtures
or pure species for all properties except fugacity. All properties are
derived with the CAS SymPy, not relying on any derivations previously
published.
.. math::
P=\frac{RT}{V-b}-\frac{a\alpha(T)}{V^2 + \delta V + \epsilon}
The main methods (in order they are called) are :obj:`GCEOS.solve`, :obj:`GCEOS.set_from_PT`,
:obj:`GCEOS.volume_solutions`, and :obj:`GCEOS.set_properties_from_solution`.
:obj:`GCEOS.solve` calls :obj:`GCEOS.check_sufficient_inputs`, which checks if two of `T`, `P`,
and `V` were set. It then solves for the
remaining variable. If `T` is missing, method :obj:`GCEOS.solve_T` is used; it is
parameter specific, and so must be implemented in each specific EOS.
If `P` is missing, it is directly calculated. If `V` is missing, it
is calculated with the method :obj:`GCEOS.volume_solutions`. At this point, either
three possible volumes or one user specified volume are known. The
value of `a_alpha`, and its first and second temperature derivative are
calculated with the EOS-specific method :obj:`GCEOS.a_alpha_and_derivatives`.
If `V` is not provided, :obj:`GCEOS.volume_solutions` calculates the three
possible molar volumes which are solutions to the EOS; in the single-phase
region, only one solution is real and correct. In the two-phase region, all
volumes are real, but only the largest and smallest solution are physically
meaningful, with the largest being that of the gas and the smallest that of
the liquid.
:obj:`GCEOS.set_from_PT` is called to sort out the possible molar volumes. For the
case of a user-specified `V`, the possibility of there existing another
solution is ignored for speed. If there is only one real volume, the
method :obj:`GCEOS.set_properties_from_solution` is called with it. If there are
two real volumes, :obj:`GCEOS.set_properties_from_solution` is called once with each
volume. The phase is returned by :obj:`GCEOS.set_properties_from_solution`, and the
volumes is set to either :obj:`GCEOS.V_l` or :obj:`GCEOS.V_g` as appropriate.
:obj:`GCEOS.set_properties_from_solution` is a large function which calculates all relevant
partial derivatives and properties of the EOS. 17 derivatives and excess
enthalpy and entropy are calculated first.
Finally, it sets all these properties as attibutes for either
the liquid or gas phase with the convention of adding on `_l` or `_g` to
the variable names, respectively.
Attributes
----------
T : float
Temperature of cubic EOS state, [K]
P : float
Pressure of cubic EOS state, [Pa]
a : float
`a` parameter of cubic EOS; formulas vary with the EOS, [Pa*m^6/mol^2]
b : float
`b` parameter of cubic EOS; formulas vary with the EOS, [m^3/mol]
delta : float
Coefficient calculated by EOS-specific method, [m^3/mol]
epsilon : float
Coefficient calculated by EOS-specific method, [m^6/mol^2]
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of :math:`a \alpha` calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of :math:`a \alpha` calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
Zc : float
Critical compressibility of cubic EOS state, [-]
phase : str
One of 'l', 'g', or 'l/g' to represent whether or not there is a
liquid-like solution, vapor-like solution, or both available, [-]
raw_volumes : list[(float, complex), 3]
Calculated molar volumes from the volume solver; depending on the state
and selected volume solver, imaginary volumes may be represented by 0
or -1j to save the time of actually calculating them, [m^3/mol]
V_l : float
Liquid phase molar volume, [m^3/mol]
V_g : float
Vapor phase molar volume, [m^3/mol]
V : float or None
Molar volume specified as input; otherwise None, [m^3/mol]
Z_l : float
Liquid phase compressibility, [-]
Z_g : float
Vapor phase compressibility, [-]
PIP_l : float
Liquid phase phase identification parameter, [-]
PIP_g : float
Vapor phase phase identification parameter, [-]
dP_dT_l : float
Liquid phase temperature derivative of pressure at constant volume,
[Pa/K].
.. math::
\left(\frac{\partial P}{\partial T}\right)_V = \frac{R}{V - b}
- \frac{a \frac{d \alpha{\left (T \right )}}{d T}}{V^{2} + V \delta
+ \epsilon}
dP_dT_g : float
Vapor phase temperature derivative of pressure at constant volume,
[Pa/K].
.. math::
\left(\frac{\partial P}{\partial T}\right)_V = \frac{R}{V - b}
- \frac{a \frac{d \alpha{\left (T \right )}}{d T}}{V^{2} + V \delta
+ \epsilon}
dP_dV_l : float
Liquid phase volume derivative of pressure at constant temperature,
[Pa*mol/m^3].
.. math::
\left(\frac{\partial P}{\partial V}\right)_T = - \frac{R T}{\left(
V - b\right)^{2}} - \frac{a \left(- 2 V - \delta\right) \alpha{
\left (T \right )}}{\left(V^{2} + V \delta + \epsilon\right)^{2}}
dP_dV_g : float
Gas phase volume derivative of pressure at constant temperature,
[Pa*mol/m^3].
.. math::
\left(\frac{\partial P}{\partial V}\right)_T = - \frac{R T}{\left(
V - b\right)^{2}} - \frac{a \left(- 2 V - \delta\right) \alpha{
\left (T \right )}}{\left(V^{2} + V \delta + \epsilon\right)^{2}}
dV_dT_l : float
Liquid phase temperature derivative of volume at constant pressure,
[m^3/(mol*K)].
.. math::
\left(\frac{\partial V}{\partial T}\right)_P =-\frac{
\left(\frac{\partial P}{\partial T}\right)_V}{
\left(\frac{\partial P}{\partial V}\right)_T}
dV_dT_g : float
Gas phase temperature derivative of volume at constant pressure,
[m^3/(mol*K)].
.. math::
\left(\frac{\partial V}{\partial T}\right)_P =-\frac{
\left(\frac{\partial P}{\partial T}\right)_V}{
\left(\frac{\partial P}{\partial V}\right)_T}
dV_dP_l : float
Liquid phase pressure derivative of volume at constant temperature,
[m^3/(mol*Pa)].
.. math::
\left(\frac{\partial V}{\partial P}\right)_T =-\frac{
\left(\frac{\partial V}{\partial T}\right)_P}{
\left(\frac{\partial P}{\partial T}\right)_V}
dV_dP_g : float
Gas phase pressure derivative of volume at constant temperature,
[m^3/(mol*Pa)].
.. math::
\left(\frac{\partial V}{\partial P}\right)_T =-\frac{
\left(\frac{\partial V}{\partial T}\right)_P}{
\left(\frac{\partial P}{\partial T}\right)_V}
dT_dV_l : float
Liquid phase volume derivative of temperature at constant pressure,
[K*mol/m^3].
.. math::
\left(\frac{\partial T}{\partial V}\right)_P = \frac{1}
{\left(\frac{\partial V}{\partial T}\right)_P}
dT_dV_g : float
Gas phase volume derivative of temperature at constant pressure,
[K*mol/m^3]. See :obj:`GCEOS.set_properties_from_solution` for
the formula.
dT_dP_l : float
Liquid phase pressure derivative of temperature at constant volume,
[K/Pa].
.. math::
\left(\frac{\partial T}{\partial P}\right)_V = \frac{1}
{\left(\frac{\partial P}{\partial T}\right)_V}
dT_dP_g : float
Gas phase pressure derivative of temperature at constant volume,
[K/Pa].
.. math::
\left(\frac{\partial T}{\partial P}\right)_V = \frac{1}
{\left(\frac{\partial P}{\partial T}\right)_V}
d2P_dT2_l : float
Liquid phase second derivative of pressure with respect to temperature
at constant volume, [Pa/K^2].
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_V = - \frac{a
\frac{d^{2} \alpha{\left (T \right )}}{d T^{2}}}{V^{2} + V \delta
+ \epsilon}
d2P_dT2_g : float
Gas phase second derivative of pressure with respect to temperature
at constant volume, [Pa/K^2].
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_V = - \frac{a
\frac{d^{2} \alpha{\left (T \right )}}{d T^{2}}}{V^{2} + V \delta
+ \epsilon}
d2P_dV2_l : float
Liquid phase second derivative of pressure with respect to volume
at constant temperature, [Pa*mol^2/m^6].
.. math::
\left(\frac{\partial^2 P}{\partial V^2}\right)_T = 2 \left(\frac{
R T}{\left(V - b\right)^{3}} - \frac{a \left(2 V + \delta\right)^{
2} \alpha{\left (T \right )}}{\left(V^{2} + V \delta + \epsilon
\right)^{3}} + \frac{a \alpha{\left (T \right )}}{\left(V^{2} + V
\delta + \epsilon\right)^{2}}\right)
d2P_dTdV_l : float
Liquid phase second derivative of pressure with respect to volume
and then temperature, [Pa*mol/(K*m^3)].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial V}\right) = - \frac{
R}{\left(V - b\right)^{2}} + \frac{a \left(2 V + \delta\right)
\frac{d \alpha{\left (T \right )}}{d T}}{\left(V^{2} + V \delta
+ \epsilon\right)^{2}}
d2P_dTdV_g : float
Gas phase second derivative of pressure with respect to volume
and then temperature, [Pa*mol/(K*m^3)].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial V}\right) = - \frac{
R}{\left(V - b\right)^{2}} + \frac{a \left(2 V + \delta\right)
\frac{d \alpha{\left (T \right )}}{d T}}{\left(V^{2} + V \delta
+ \epsilon\right)^{2}}
H_dep_l : float
Liquid phase departure enthalpy, [J/mol]. See
:obj:`GCEOS.set_properties_from_solution` for the formula.
H_dep_g : float
Gas phase departure enthalpy, [J/mol]. See
:obj:`GCEOS.set_properties_from_solution` for the formula.
S_dep_l : float
Liquid phase departure entropy, [J/(mol*K)]. See
:obj:`GCEOS.set_properties_from_solution` for the formula.
S_dep_g : float
Gas phase departure entropy, [J/(mol*K)]. See
:obj:`GCEOS.set_properties_from_solution` for the formula.
G_dep_l : float
Liquid phase departure Gibbs energy, [J/mol].
.. math::
G_{dep} = H_{dep} - T S_{dep}
G_dep_g : float
Gas phase departure Gibbs energy, [J/mol].
.. math::
G_{dep} = H_{dep} - T S_{dep}
Cp_dep_l : float
Liquid phase departure heat capacity, [J/(mol*K)]
.. math::
C_{p, dep} = (C_p-C_v)_{\text{from EOS}} + C_{v, dep} - R
Cp_dep_g : float
Gas phase departure heat capacity, [J/(mol*K)]
.. math::
C_{p, dep} = (C_p-C_v)_{\text{from EOS}} + C_{v, dep} - R
Cv_dep_l : float
Liquid phase departure constant volume heat capacity, [J/(mol*K)].
See :obj:`GCEOS.set_properties_from_solution` for
the formula.
Cv_dep_g : float
Gas phase departure constant volume heat capacity, [J/(mol*K)].
See :obj:`GCEOS.set_properties_from_solution` for
the formula.
c1 : float
Full value of the constant in the `a` parameter, set in some EOSs, [-]
c2 : float
Full value of the constant in the `b` parameter, set in some EOSs, [-]
A_dep_g
A_dep_l
beta_g
beta_l
Cp_minus_Cv_g
Cp_minus_Cv_l
d2a_alpha_dTdP_g_V
d2a_alpha_dTdP_l_V
d2H_dep_dT2_g
d2H_dep_dT2_g_P
d2H_dep_dT2_g_V
d2H_dep_dT2_l
d2H_dep_dT2_l_P
d2H_dep_dT2_l_V
d2H_dep_dTdP_g
d2H_dep_dTdP_l
d2P_drho2_g
d2P_drho2_l
d2P_dT2_PV_g
d2P_dT2_PV_l
d2P_dTdP_g
d2P_dTdP_l
d2P_dTdrho_g
d2P_dTdrho_l
d2P_dVdP_g
d2P_dVdP_l
d2P_dVdT_g
d2P_dVdT_l
d2P_dVdT_TP_g
d2P_dVdT_TP_l
d2rho_dP2_g
d2rho_dP2_l
d2rho_dPdT_g
d2rho_dPdT_l
d2rho_dT2_g
d2rho_dT2_l
d2S_dep_dT2_g
d2S_dep_dT2_g_V
d2S_dep_dT2_l
d2S_dep_dT2_l_V
d2S_dep_dTdP_g
d2S_dep_dTdP_l
d2T_dP2_g
d2T_dP2_l
d2T_dPdrho_g
d2T_dPdrho_l
d2T_dPdV_g
d2T_dPdV_l
d2T_drho2_g
d2T_drho2_l
d2T_dV2_g
d2T_dV2_l
d2T_dVdP_g
d2T_dVdP_l
d2V_dP2_g
d2V_dP2_l
d2V_dPdT_g
d2V_dPdT_l
d2V_dT2_g
d2V_dT2_l
d2V_dTdP_g
d2V_dTdP_l
d3a_alpha_dT3
da_alpha_dP_g_V
da_alpha_dP_l_V
dbeta_dP_g
dbeta_dP_l
dbeta_dT_g
dbeta_dT_l
dfugacity_dP_g
dfugacity_dP_l
dfugacity_dT_g
dfugacity_dT_l
dH_dep_dP_g
dH_dep_dP_g_V
dH_dep_dP_l
dH_dep_dP_l_V
dH_dep_dT_g
dH_dep_dT_g_V
dH_dep_dT_l
dH_dep_dT_l_V
dH_dep_dV_g_P
dH_dep_dV_g_T
dH_dep_dV_l_P
dH_dep_dV_l_T
dP_drho_g
dP_drho_l
dphi_dP_g
dphi_dP_l
dphi_dT_g
dphi_dT_l
drho_dP_g
drho_dP_l
drho_dT_g
drho_dT_l
dS_dep_dP_g
dS_dep_dP_g_V
dS_dep_dP_l
dS_dep_dP_l_V
dS_dep_dT_g
dS_dep_dT_g_V
dS_dep_dT_l
dS_dep_dT_l_V
dS_dep_dV_g_P
dS_dep_dV_g_T
dS_dep_dV_l_P
dS_dep_dV_l_T
dT_drho_g
dT_drho_l
dZ_dP_g
dZ_dP_l
dZ_dT_g
dZ_dT_l
fugacity_g
fugacity_l
kappa_g
kappa_l
lnphi_g
lnphi_l
more_stable_phase
mpmath_volume_ratios
mpmath_volumes
mpmath_volumes_float
phi_g
phi_l
rho_g
rho_l
sorted_volumes
state_specs
U_dep_g
U_dep_l
Vc
V_dep_g
V_dep_l
V_g_mpmath
V_l_mpmath
'''
# Slots does not help performance in either implementation
kwargs = {}
'''Dictionary which holds input parameters to an EOS which are non-standard;
this excludes `T`, `P`, `V`, `omega`, `Tc`, `Pc`, `Vc` but includes EOS
specific parameters like `S1` and `alpha_coeffs`.
'''
N = 1
'''The number of components in the EOS'''
scalar = True
multicomponent = False
'''Whether or not the EOS is multicomponent or not'''
_P_zero_l_cheb_coeffs = None
P_zero_l_cheb_limits = (0.0, 0.0)
_P_zero_g_cheb_coeffs = None
P_zero_g_cheb_limits = (0.0, 0.0)
Psat_cheb_range = (0.0, 0.0)
main_derivatives_and_departures = staticmethod(main_derivatives_and_departures)
c1 = None
'''Parameter used by some equations of state in the `a` calculation'''
c2 = None
'''Parameter used by some equations of state in the `b` calculation'''
nonstate_constants = ('Tc', 'Pc', 'omega', 'kwargs', 'a', 'b', 'delta', 'epsilon')
kwargs_keys = tuple()
if not is_micropython:
def __init_subclass__(cls):
cls.__full_path__ = "%s.%s" %(cls.__module__, cls.__qualname__)
else:
__full_path__ = None
def state_hash(self):
r'''Basic method to calculate a hash of the state of the model and its
model parameters.
Note that the hashes should only be compared on the same system running
in the same process!
Returns
-------
state_hash : int
Hash of the object's model parameters and state, [-]
'''
if self.multicomponent:
comp = self.zs
else:
comp = 0
return hash_any_primitive((self.model_hash(), self.T, self.P, self.V, comp))
def model_hash(self):
r'''Basic method to calculate a hash of the non-state parts of the model
This is useful for comparing to models to
determine if they are the same, i.e. in a VLL flash it is important to
know if both liquids have the same model.
Note that the hashes should only be compared on the same system running
in the same process!
Returns
-------
model_hash : int
Hash of the object's model parameters, [-]
'''
try:
return self._model_hash
except AttributeError:
pass
h = hash(self.__class__.__name__)
for s in self.nonstate_constants:
try:
h = hash((h, s, hash_any_primitive(getattr(self, s))))
except AttributeError:
pass
self._model_hash = h
return h
def __hash__(self):
r'''Method to calculate and return a hash representing the exact state
of the object.
Returns
-------
hash : int
Hash of the object, [-]
'''
d = self.__dict__
ans = hash_any_primitive((self.__class__.__name__, d))
return ans
def __eq__(self, other):
return self.__hash__() == hash(other)
@property
def state_specs(self):
'''Convenience method to return the two specified state specs (`T`,
`P`, or `V`) as a dictionary.
Examples
--------
>>> PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, V=1.0).state_specs
{'T': 500.0, 'V': 1.0}
'''
d = {}
if hasattr(self, 'no_T_spec') and self.no_T_spec:
d['P'] = self.P
d['V'] = self.V
elif self.V is not None:
d['T'] = self.T
d['V'] = self.V
else:
d['T'] = self.T
d['P'] = self.P
return d
def __repr__(self):
'''Create a string representation of the EOS - by default, include
all parameters so as to make it easy to construct new instances from
states. Includes the two specified state variables, `Tc`, `Pc`, `omega`
and any `kwargs`.
Returns
-------
recreation : str
String which is valid Python and recreates the current state of
the object if ran, [-]
Examples
--------
>>> eos = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=400.0, P=1e6)
>>> eos
PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=400.0, P=1000000.0)
'''
s = '%s(Tc=%s, Pc=%s, omega=%s, ' %(self.__class__.__name__, repr(self.Tc), repr(self.Pc), repr(self.omega))
for k, v in self.kwargs.items():
s += '%s=%s, ' %(k, v)
if hasattr(self, 'no_T_spec') and self.no_T_spec:
s += 'P=%s, V=%s' %(repr(self.P), repr(self.V))
elif self.V is not None:
s += 'T=%s, V=%s' %(repr(self.T), repr(self.V))
else:
s += 'T=%s, P=%s' %(repr(self.T), repr(self.P))
s += ')'
return s
def as_json(self):
r'''Method to create a JSON-friendly serialization of the eos
which can be stored, and reloaded later.
Returns
-------
json_repr : dict
JSON-friendly representation, [-]
Notes
-----
Examples
--------
>>> import json
>>> eos = MSRKTranslated(Tc=507.6, Pc=3025000, omega=0.2975, c=22.0561E-6, M=0.7446, N=0.2476, T=250., P=1E6)
>>> assert eos == MSRKTranslated.from_json(json.loads(json.dumps(eos.as_json())))
'''
# vaguely jsonpickle compatible
d = self.__dict__.copy()
if not self.scalar:
d = serialize.arrays_to_lists(d)
# TODO: delete kwargs and reconstruct it
# Need to add all kwargs attributes
try:
del d['kwargs']
except:
pass
d["py/object"] = self.__full_path__
d['json_version'] = 1
return d
@classmethod
def from_json(cls, json_repr):
r'''Method to create a eos from a JSON
serialization of another eos.
Parameters
----------
json_repr : dict
JSON-friendly representation, [-]
Returns
-------
eos : :obj:`GCEOS`
Newly created object from the json serialization, [-]
Notes
-----
It is important that the input string be in the same format as that
created by :obj:`GCEOS.as_json`.
Examples
--------
>>> eos = MSRKTranslated(Tc=507.6, Pc=3025000, omega=0.2975, c=22.0561E-6, M=0.7446, N=0.2476, T=250., P=1E6)
>>> string = eos.as_json()
>>> new_eos = GCEOS.from_json(string)
>>> assert eos.__dict__ == new_eos.__dict__
'''
d = json_repr
eos_name = d['py/object']
del d['py/object']
del d['json_version']
try:
d['raw_volumes'] = tuple(d['raw_volumes'])
except:
pass
try:
d['alpha_coeffs'] = tuple(d['alpha_coeffs'])
except:
pass
eos = eos_full_path_dict[eos_name]
if eos.kwargs_keys:
d['kwargs'] = {k: d[k] for k in eos.kwargs_keys}
try:
d['kwargs']['alpha_coeffs'] = tuple(d['kwargs']['alpha_coeffs'])
except:
pass
new = eos.__new__(eos)
new.__dict__ = d
return new
def check_sufficient_inputs(self):
'''Method to an exception if none of the pairs (T, P), (T, V), or
(P, V) are given. '''
if not ((self.T is not None and self.P is not None) or
(self.T is not None and self.V is not None) or
(self.P is not None and self.V is not None)):
raise ValueError('Either T and P, or T and V, or P and V are required')
def solve(self, pure_a_alphas=True, only_l=False, only_g=False, full_alphas=True):
'''First EOS-generic method; should be called by all specific EOSs.
For solving for `T`, the EOS must provide the method `solve_T`.
For all cases, the EOS must provide `a_alpha_and_derivatives`.
Calls `set_from_PT` once done.
'''
# self.check_sufficient_inputs()
if self.V is not None:
V = self.V
if self.P is not None:
solution = 'g' if (only_g and not only_l) else ('l' if only_l else None)
self.T = self.solve_T(self.P, V, solution=solution)
self.a_alpha, self.da_alpha_dT, self.d2a_alpha_dT2 = self.a_alpha_and_derivatives(self.T, pure_a_alphas=pure_a_alphas)
elif self.T is not None:
self.a_alpha, self.da_alpha_dT, self.d2a_alpha_dT2 = self.a_alpha_and_derivatives(self.T, pure_a_alphas=pure_a_alphas)
# Tested to change the result at the 7th decimal once
# V_r3 = V**(1.0/3.0)
# T, b, a_alpha, delta, epsilon = self.T, self.b, self.a_alpha, self.delta, self.epsilon
# P = R*T/(V-b) - a_alpha/((V_r3*V_r3)*(V_r3*(V+delta)) + epsilon)
#
# for _ in range(10):
# err = -T + (P*V**3 - P*V**2*b + P*V**2*delta - P*V*b*delta + P*V*epsilon - P*b*epsilon + V*a_alpha - a_alpha*b)/(R*(V**2 + V*delta + epsilon))
# derr = (V**3 - V**2*b + V**2*delta - V*b*delta + V*epsilon - b*epsilon)/(R*(V**2 + V*delta + epsilon))
# P = P - err/derr
# self.P = P
# Equation re-aranged to hopefully solve better
# Allow mpf multiple precision volume for flash initialization
# DO NOT TAKE OUT FLOAT CONVERSION!
T = self.T
if not isinstance(V, (float, int)):
import mpmath as mp
# mp.mp.dps = 50 # Do not need more decimal places than needed
# Need to complete the calculation with the RT term having higher precision as well
T = mp.mpf(T)
self.P = float(R*T/(V-self.b) - self.a_alpha/(V*V + self.delta*V + self.epsilon))
if self.P <= 0.0:
raise ValueError("TV inputs result in negative pressure of %f Pa" %(self.P))
# self.P = R*self.T/(V-self.b) - self.a_alpha/(V*(V + self.delta) + self.epsilon)
else:
raise ValueError("Two specs are required")
Vs = [V, 1.0j, 1.0j]
elif self.T is None or self.P is None:
raise ValueError("Two specs are required")
else:
if full_alphas:
self.a_alpha, self.da_alpha_dT, self.d2a_alpha_dT2 = self.a_alpha_and_derivatives(self.T, pure_a_alphas=pure_a_alphas)
else:
self.a_alpha = self.a_alpha_and_derivatives(self.T, full=False, pure_a_alphas=pure_a_alphas)
self.da_alpha_dT, self.d2a_alpha_dT2 = -5e-3, 1.5e-5
self.raw_volumes = Vs = self.volume_solutions(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
self.set_from_PT(Vs, only_l=only_l, only_g=only_g)
def resolve_full_alphas(self):
'''Generic method to resolve the eos with fully calculated alpha
derviatives. Re-calculates properties with the new alpha derivatives
for any previously solved roots.
'''
self.a_alpha, self.da_alpha_dT, self.d2a_alpha_dT2 = self.a_alpha_and_derivatives(self.T, full=True, pure_a_alphas=False)
self.set_from_PT(self.raw_volumes, only_l=hasattr(self, 'V_l'), only_g=hasattr(self, 'V_g'))
def solve_missing_volumes(self):
r'''Generic method to ensure both volumes, if solutions are physical,
have calculated properties. This effectively un-does the optimization
of the `only_l` and `only_g` keywords.
'''
if self.phase == 'l/g':
try:
self.V_l
except:
self.set_from_PT(self.raw_volumes, only_l=True, only_g=False)
try:
self.V_g
except:
self.set_from_PT(self.raw_volumes, only_l=False, only_g=True)
def set_from_PT(self, Vs, only_l=False, only_g=False):
r'''Counts the number of real volumes in `Vs`, and determines what to do.
If there is only one real volume, the method
`set_properties_from_solution` is called with it. If there are
two real volumes, `set_properties_from_solution` is called once with
each volume. The phase is returned by `set_properties_from_solution`,
and the volumes is set to either `V_l` or `V_g` as appropriate.
Parameters
----------
Vs : list[float]
Three possible molar volumes, [m^3/mol]
only_l : bool
When true, if there is a liquid and a vapor root, only the liquid
root (and properties) will be set.
only_g : bool
When true, if there is a liquid and a vapor root, only the vapor
root (and properties) will be set.
Notes
-----
An optimization attempt was made to remove min() and max() from this
function; that is indeed possible, but the check for handling if there
are two or three roots makes it not worth it.
'''
# good_roots = [i.real for i in Vs if i.imag == 0.0 and i.real > 0.0]
# good_root_count = len(good_roots)
# All roots will have some imaginary component; ignore them if > 1E-9 (when using a solver that does not strip them)
b = self.b
# good_roots = [i.real for i in Vs if (i.real ==0 or abs(i.imag/i.real) < 1E-12) and i.real > 0.0]
good_roots = [i.real for i in Vs if (i.real > b and (i.real == 0.0 or abs(i.imag/i.real) < 1E-12))]
# Counter for the case of testing volume solutions that don't work
# good_roots = [i.real for i in Vs if (i.real > 0.0 and (i.real == 0.0 or abs(i.imag) < 1E-9))]
good_root_count = len(good_roots)
if good_root_count == 1 or (good_roots[0] == good_roots[1]):
self.phase = self.set_properties_from_solution(self.T, self.P,
good_roots[0], b,
self.delta, self.epsilon,
self.a_alpha, self.da_alpha_dT,
self.d2a_alpha_dT2)
if self.N == 1 and (
(self.multicomponent and (self.Tcs[0] == self.T and self.Pcs[0] == self.P))
or (not self.multicomponent and self.Tc == self.T and self.Pc == self.P)):
# Do not have any tests for this - not good!
force_l = not self.phase == 'l'
force_g = not self.phase == 'g'
self.set_properties_from_solution(self.T, self.P,
good_roots[0], b,
self.delta, self.epsilon,
self.a_alpha, self.da_alpha_dT,
self.d2a_alpha_dT2,
force_l=force_l,
force_g=force_g)
self.phase = 'l/g'
elif good_root_count > 1:
V_l, V_g = min(good_roots), max(good_roots)
if not only_g:
self.set_properties_from_solution(self.T, self.P, V_l, b,
self.delta, self.epsilon,
self.a_alpha, self.da_alpha_dT,
self.d2a_alpha_dT2,
force_l=True)
if not only_l:
self.set_properties_from_solution(self.T, self.P, V_g, b,
self.delta, self.epsilon,
self.a_alpha, self.da_alpha_dT,
self.d2a_alpha_dT2, force_g=True)
self.phase = 'l/g'
else:
# Even in the case of three real roots, it is still the min/max that make sense
print([self.T, self.P, b, self.delta, self.epsilon, self.a_alpha, 'coordinates of failure'])
if self.multicomponent:
extra = ', zs is %s' %(self.zs)
else:
extra = ''
raise ValueError('No acceptable roots were found; the roots are %s, T is %s K, P is %s Pa, a_alpha is %s, b is %s%s' %(str(Vs), str(self.T), str(self.P), str([self.a_alpha]), str([self.b]), extra))
def set_properties_from_solution(self, T, P, V, b, delta, epsilon, a_alpha,
da_alpha_dT, d2a_alpha_dT2, quick=True,
force_l=False, force_g=False):
r'''Sets all interesting properties which can be calculated from an
EOS alone. Determines which phase the fluid is on its own; for details,
see `phase_identification_parameter`.
The list of properties set is as follows, with all properties suffixed
with '_l' or '_g'.
dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP, d2P_dT2, d2P_dV2, d2V_dT2,
d2V_dP2, d2T_dV2, d2T_dP2, d2V_dPdT, d2P_dTdV, d2T_dPdV, H_dep, S_dep,
G_dep and PIP.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
b : float
Coefficient calculated by EOS-specific method, [m^3/mol]
delta : float
Coefficient calculated by EOS-specific method, [m^3/mol]
epsilon : float
Coefficient calculated by EOS-specific method, [m^6/mol^2]
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K**2]
quick : bool, optional
Whether to use a SymPy cse-derived expression (3x faster) or
individual formulas
Returns
-------
phase : str
Either 'l' or 'g'
Notes
-----
The individual formulas for the derivatives and excess properties are
as follows. For definitions of `beta`, see `isobaric_expansion`;
for `kappa`, see isothermal_compressibility; for `Cp_minus_Cv`, see
`Cp_minus_Cv`; for `phase_identification_parameter`, see
`phase_identification_parameter`.
First derivatives; in part using the Triple Product Rule [2]_, [3]_:
.. math::
\left(\frac{\partial P}{\partial T}\right)_V = \frac{R}{V - b}
- \frac{a \frac{d \alpha{\left (T \right )}}{d T}}{V^{2} + V \delta
+ \epsilon}
.. math::
\left(\frac{\partial P}{\partial V}\right)_T = - \frac{R T}{\left(
V - b\right)^{2}} - \frac{a \left(- 2 V - \delta\right) \alpha{
\left (T \right )}}{\left(V^{2} + V \delta + \epsilon\right)^{2}}
.. math::
\left(\frac{\partial V}{\partial T}\right)_P =-\frac{
\left(\frac{\partial P}{\partial T}\right)_V}{
\left(\frac{\partial P}{\partial V}\right)_T}
.. math::
\left(\frac{\partial V}{\partial P}\right)_T =-\frac{
\left(\frac{\partial V}{\partial T}\right)_P}{
\left(\frac{\partial P}{\partial T}\right)_V}
.. math::
\left(\frac{\partial T}{\partial V}\right)_P = \frac{1}
{\left(\frac{\partial V}{\partial T}\right)_P}
.. math::
\left(\frac{\partial T}{\partial P}\right)_V = \frac{1}
{\left(\frac{\partial P}{\partial T}\right)_V}
Second derivatives with respect to one variable; those of `T` and `V`
use identities shown in [1]_ and verified numerically:
.. math::
\left(\frac{\partial^2 P}{\partial T^2}\right)_V = - \frac{a
\frac{d^{2} \alpha{\left (T \right )}}{d T^{2}}}{V^{2} + V \delta
+ \epsilon}
.. math::
\left(\frac{\partial^2 P}{\partial V^2}\right)_T = 2 \left(\frac{
R T}{\left(V - b\right)^{3}} - \frac{a \left(2 V + \delta\right)^{
2} \alpha{\left (T \right )}}{\left(V^{2} + V \delta + \epsilon
\right)^{3}} + \frac{a \alpha{\left (T \right )}}{\left(V^{2} + V
\delta + \epsilon\right)^{2}}\right)
Second derivatives with respect to the other two variables; those of
`T` and `V` use identities shown in [1]_ and verified numerically:
.. math::
\left(\frac{\partial^2 P}{\partial T \partial V}\right) = - \frac{
R}{\left(V - b\right)^{2}} + \frac{a \left(2 V + \delta\right)
\frac{d \alpha{\left (T \right )}}{d T}}{\left(V^{2} + V \delta
+ \epsilon\right)^{2}}
Excess properties
.. math::
H_{dep} = \int_{\infty}^V \left[T\frac{\partial P}{\partial T}_V
- P\right]dV + PV - RT= P V - R T + \frac{2}{\sqrt{
\delta^{2} - 4 \epsilon}} \left(T a \frac{d \alpha{\left (T \right
)}}{d T} - a \alpha{\left (T \right )}\right) \operatorname{atanh}
{\left (\frac{2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}}
\right)}
.. math::
S_{dep} = \int_{\infty}^V\left[\frac{\partial P}{\partial T}
- \frac{R}{V}\right] dV + R\ln\frac{PV}{RT} = - R \ln{\left (V
\right )} + R \ln{\left (\frac{P V}{R T} \right )} + R \ln{\left
(V - b \right )} + \frac{2 a \frac{d\alpha{\left (T \right )}}{d T}
}{\sqrt{\delta^{2} - 4 \epsilon}} \operatorname{atanh}{\left (\frac
{2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right )}
.. math::
G_{dep} = H_{dep} - T S_{dep}
.. math::
C_{v, dep} = T\int_\infty^V \left(\frac{\partial^2 P}{\partial
T^2}\right) dV = - T a \left(\sqrt{\frac{1}{\delta^{2} - 4
\epsilon}} \ln{\left (V - \frac{\delta^{2}}{2} \sqrt{\frac{1}{
\delta^{2} - 4 \epsilon}} + \frac{\delta}{2} + 2 \epsilon \sqrt{
\frac{1}{\delta^{2} - 4 \epsilon}} \right )} - \sqrt{\frac{1}{
\delta^{2} - 4 \epsilon}} \ln{\left (V + \frac{\delta^{2}}{2}
\sqrt{\frac{1}{\delta^{2} - 4 \epsilon}} + \frac{\delta}{2}
- 2 \epsilon \sqrt{\frac{1}{\delta^{2} - 4 \epsilon}} \right )}
\right) \frac{d^{2} \alpha{\left (T \right )} }{d T^{2}}
.. math::
C_{p, dep} = (C_p-C_v)_{\text{from EOS}} + C_{v, dep} - R
References
----------
.. [1] Thorade, Matthis, and Ali Saadat. "Partial Derivatives of
Thermodynamic State Properties for Dynamic Simulation."
Environmental Earth Sciences 70, no. 8 (April 10, 2013): 3497-3503.
doi:10.1007/s12665-013-2394-z.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
.. [3] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep = (
self.main_derivatives_and_departures(T, P, V, b, delta, epsilon,
a_alpha, da_alpha_dT,
d2a_alpha_dT2))
try:
dV_dP = 1.0/dP_dV
except:
dV_dP = inf
dT_dP = 1./dP_dT
dV_dT = -dP_dT*dV_dP
dT_dV = 1./dV_dT
Z = P*V*R_inv/T
Cp_dep = T*dP_dT*dV_dT + Cv_dep - R
G_dep = H_dep - T*S_dep
PIP = V*(d2P_dTdV*dT_dP - d2P_dV2*dV_dP) # phase_identification_parameter(V, dP_dT, dP_dV, d2P_dV2, d2P_dTdV)
# 1 + 1e-14 - allow a few dozen unums of toleranve to keep ideal gas model a gas
if force_l or (not force_g and PIP > 1.00000000000001):
(self.V_l, self.Z_l, self.PIP_l, self.dP_dT_l, self.dP_dV_l,
self.dV_dT_l, self.dV_dP_l, self.dT_dV_l, self.dT_dP_l,
self.d2P_dT2_l, self.d2P_dV2_l, self.d2P_dTdV_l, self.H_dep_l,
self.S_dep_l, self.G_dep_l, self.Cp_dep_l, self.Cv_dep_l) = (
V, Z, PIP, dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP,
d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, G_dep, Cp_dep,
Cv_dep)
return 'l'
else:
(self.V_g, self.Z_g, self.PIP_g, self.dP_dT_g, self.dP_dV_g,
self.dV_dT_g, self.dV_dP_g, self.dT_dV_g, self.dT_dP_g,
self.d2P_dT2_g, self.d2P_dV2_g, self.d2P_dTdV_g, self.H_dep_g,
self.S_dep_g, self.G_dep_g, self.Cp_dep_g, self.Cv_dep_g) = (
V, Z, PIP, dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP,
d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, G_dep, Cp_dep,
Cv_dep)
return 'g'
def a_alpha_and_derivatives(self, T, full=True, quick=True,
pure_a_alphas=True):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives.
Parameters
----------
T : float
Temperature, [K]
full : bool, optional
If False, calculates and returns only `a_alpha`, [-]
quick : bool, optional
Legary parameter being phased out [-]
pure_a_alphas : bool, optional
Whether or not to recalculate the a_alpha terms of pure components
(for the case of mixtures only) which stay the same as the
composition changes (i.e in a PT flash); does nothing in the case
of pure EOSs [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
if full:
return self.a_alpha_and_derivatives_pure(T=T)
return self.a_alpha_pure(T)
def a_alpha_and_derivatives_pure(self, T):
r'''Dummy method to calculate :math:`a \alpha` and its first and second
derivatives. Should be implemented with the same function signature in
each EOS variant; this only raises a NotImplemented Exception.
Should return 'a_alpha', 'da_alpha_dT', and 'd2a_alpha_dT2'.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
raise NotImplementedError('a_alpha and its first and second derivatives '
'should be calculated by this method, in a user subclass.')
@property
def d3a_alpha_dT3(self):
r'''Method to calculate the third temperature derivative of
:math:`a \alpha`, [J^2/mol^2/Pa/K^3]. This parameter is needed for
some higher derivatives that are needed in some flash calculations.
Returns
-------
d3a_alpha_dT3 : float
Third temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^3]
'''
try:
return self._d3a_alpha_dT3
except AttributeError:
pass
self._d3a_alpha_dT3 = self.d3a_alpha_dT3_pure(self.T)
return self._d3a_alpha_dT3
def a_alpha_plot(self, Tmin=1e-4, Tmax=None, pts=1000, plot=True,
show=True):
r'''Method to create a plot of the :math:`a \alpha` parameter and its
first two derivatives. This easily allows identification of EOSs which
are displaying inconsistent behavior.
Parameters
----------
Tmin : float
Minimum temperature of calculation, [K]
Tmax : float
Maximum temperature of calculation, [K]
pts : int, optional
The number of temperature points to include [-]
plot : bool
If False, the calculated values and temperatures are returned
without plotting the data, [-]
show : bool
Whether or not the plot should be rendered and shown; a handle to
it is returned if `plot` is True for other purposes such as saving
the plot to a file, [-]
Returns
-------
Ts : list[float]
Logarithmically spaced temperatures in specified range, [K]
a_alpha : list[float]
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : list[float]
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : list[float]
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
fig : matplotlib.figure.Figure
Plotted figure, only returned if `plot` is True, [-]
'''
if Tmax is None:
if self.multicomponent:
Tc = self.pseudo_Tc
else:
Tc = self.Tc
Tmax = Tc*10
Ts = logspace(log10(Tmin), log10(Tmax), pts)
a_alphas = []
da_alphas = []
d2a_alphas = []
for T in Ts:
v, d1, d2 = self.a_alpha_and_derivatives(T, full=True)
a_alphas.append(v)
da_alphas.append(d1)
d2a_alphas.append(d2)
if plot:
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('Temperature [K]')
ln0 = ax1.plot(Ts, a_alphas, 'r', label=r'$a \alpha$ [J^2/mol^2/Pa]')
ln2 = ax1.plot(Ts, d2a_alphas, 'g', label='Second derivative [J^2/mol^2/Pa/K^2]')
ax1.set_yscale('log')
ax1.set_ylabel(r'$a \alpha$ and $\frac{\partial (a \alpha)^2}{\partial T^2}$')
ax2 = ax1.twinx()
ax2.set_yscale('symlog')
ln1 = ax2.plot(Ts, da_alphas, 'b', label='First derivative [J^2/mol^2/Pa/K]')
ax2.set_ylabel(r'$\frac{\partial a \alpha}{\partial T}$')
ax1.set_title(r'$a \alpha$ vs temperature; range %.4g to %.4g' %(max(a_alphas), min(a_alphas)))
lines = ln0 + ln1 + ln2
labels = [l.get_label() for l in lines]
ax1.legend(lines, labels, loc=9, bbox_to_anchor=(0.5,-0.18))
if show:
plt.show()
return Ts, a_alphas, da_alphas, d2a_alphas, fig
return Ts, a_alphas, da_alphas, d2a_alphas
def solve_T(self, P, V, solution=None):
'''Generic method to calculate `T` from a specified `P` and `V`.
Provides SciPy's `newton` solver, and iterates to solve the general
equation for `P`, recalculating `a_alpha` as a function of temperature
using `a_alpha_and_derivatives` each iteration.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
'''
high_prec = type(V) is not float
denominator_inv = 1.0/(V*V + self.delta*V + self.epsilon)
V_minus_b_inv = 1.0/(V-self.b)
self.no_T_spec = True
# dP_dT could be added to use a derivative-based method, however it is
# quite costly in comparison to the extra evaluations because it
# requires the temperature derivative of da_alpha_dT
def to_solve(T):
a_alpha = self.a_alpha_and_derivatives(T, full=False)
P_calc = R*T*V_minus_b_inv - a_alpha*denominator_inv
err = P_calc - P
return err
def to_solve_newton(T):
a_alpha, da_alpha_dT, _ = self.a_alpha_and_derivatives(T, full=True)
P_calc = R*T*V_minus_b_inv - a_alpha*denominator_inv
err = P_calc - P
derr_dT = R*V_minus_b_inv - denominator_inv*da_alpha_dT
return err, derr_dT
# import matplotlib.pyplot as plt
# xs = np.logspace(np.log10(1), np.log10(1e12), 15000)
# ys = np.abs([to_solve(T) for T in xs])
# plt.loglog(xs, ys)
# plt.show()
# max(ys), min(ys)
T_guess_ig = P*V*R_inv
T_guess_liq = P*V*R_inv*1000.0 # Compressibility factor of 0.001 for liquids
err_ig = to_solve(T_guess_ig)
err_liq = to_solve(T_guess_liq)
base_tol = 1e-12
if high_prec:
base_tol = 1e-18
T_brenth, T_secant = None, None
if err_ig*err_liq < 0.0 and T_guess_liq < 3e4:
try:
T_brenth = brenth(to_solve, T_guess_ig, T_guess_liq, xtol=base_tol,
fa=err_ig, fb=err_liq)
# Check the error
err = to_solve(T_brenth)
except:
pass
# if abs(err/P) < 1e-7:
# return T_brenth
if abs(err_ig) < abs(err_liq) or T_guess_liq > 20000 or solution == 'g':
T_guess = T_guess_ig
f0 = err_ig
else:
T_guess = T_guess_liq
f0 = err_liq
# T_guess = self.Tc*0.5
# ytol=T_guess*1e-9,
try:
T_secant = secant(to_solve, T_guess, low=1e-12, xtol=base_tol, same_tol=1e4, f0=f0)
except:
T_guess = T_guess_ig if T_guess != T_guess_ig else T_guess_liq
try:
T_secant = secant(to_solve, T_guess, low=1e-12, xtol=base_tol, same_tol=1e4, f0=f0)
except:
if T_brenth is None:
# Hardcoded limits, all the cleverness sometimes does not work
T_brenth = brenth(to_solve, 1e-3, 1e4, xtol=base_tol)
if solution is not None:
if T_brenth is None or (T_secant is not None and isclose(T_brenth, T_secant, rel_tol=1e-7)):
if T_secant is not None:
attempt_bounds = [(1e-3, T_secant-1e-5), (T_secant+1e-3, 1e4), (T_secant+1e-3, 1e5)]
else:
attempt_bounds = [(1e-3, 1e4), (1e4, 1e5)]
if T_guess_liq > 1e5:
attempt_bounds.append((1e4, T_guess_liq))
attempt_bounds.append((T_guess_liq, T_guess_liq*10))
for low, high in attempt_bounds:
try:
T_brenth = brenth(to_solve, low, high, xtol=base_tol)
break
except:
pass
if T_secant is None:
if T_secant is not None:
attempt_bounds = [(1e-3, T_brenth-1e-5), (T_brenth+1e-3, 1e4), (T_brenth+1e-3, 1e5)]
else:
attempt_bounds = [(1e4, 1e5), (1e-3, 1e4)]
if T_guess_liq > 1e5:
attempt_bounds.append((1e4, T_guess_liq))
attempt_bounds.append((T_guess_liq, T_guess_liq*10))
for low, high in attempt_bounds:
try:
T_secant = brenth(to_solve, low, high, xtol=base_tol)
break
except:
pass
try:
del self.a_alpha_ijs
del self.a_alpha_roots
del self.a_alpha_ij_roots_inv
except AttributeError:
pass
if T_secant is not None:
T_secant = float(T_secant)
if T_brenth is not None:
T_brenth = float(T_brenth)
if solution is not None:
if (T_secant is not None and T_brenth is not None):
if solution == 'g':
return max(T_brenth, T_secant)
else:
return min(T_brenth, T_secant)
if T_brenth is None:
return T_secant
elif T_brenth is not None and T_secant is not None and (abs(T_brenth - 298.15) < abs(T_secant - 298.15)):
return T_brenth
elif T_secant is not None:
return T_secant
return T_brenth
# return min(T_brenth, T_secant)
# Default method
# volume_solutions = volume_solutions_NR#volume_solutions_numpy#volume_solutions_NR
# volume_solutions = staticmethod(volume_solutions_numpy)
# volume_solutions = volume_solutions_fast
# volume_solutions = staticmethod(volume_solutions_Cardano)
volume_solutions = staticmethod(volume_solutions_halley)
# volume_solutions = staticmethod(volume_solutions_doubledouble_float)
volume_solutions_mp = staticmethod(volume_solutions_mpmath)
# Solver which actually has the roots
volume_solutions_full = staticmethod(volume_solutions_NR)
# volume_solutions = volume_solutions_mpmath_float
@property
def mpmath_volumes(self):
r'''Method to calculate to a high precision the exact roots to the
cubic equation, using `mpmath`.
Returns
-------
Vs : tuple[mpf]
3 Real or not real volumes as calculated by `mpmath`, [m^3/mol]
Notes
-----
Examples
--------
>>> eos = PRTranslatedTwu(T=300, P=1e5, Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=(0.694911, 0.9199, 1.7), c=-1e-6)
>>> eos.mpmath_volumes
(mpf('0.0000489261705320261435106226558966745'), mpf('0.000541508154451321441068958547812526'), mpf('0.0243149463942697410611501615357228'))
'''
return volume_solutions_mpmath(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
@property
def mpmath_volumes_float(self):
r'''Method to calculate real roots of a cubic equation, using `mpmath`,
but returned as floats.
Returns
-------
Vs : list[float]
All volumes calculated by `mpmath`, [m^3/mol]
Notes
-----
Examples
--------
>>> eos = PRTranslatedTwu(T=300, P=1e5, Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=(0.694911, 0.9199, 1.7), c=-1e-6)
>>> eos.mpmath_volumes_float
((4.892617053202614e-05+0j), (0.0005415081544513214+0j), (0.024314946394269742+0j))
'''
return volume_solutions_mpmath_float(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
@property
def mpmath_volume_ratios(self):
r'''Method to compare, as ratios, the volumes of the implemented
cubic solver versus those calculated using `mpmath`.
Returns
-------
ratios : list[mpc]
Either 1 or 3 volume ratios as calculated by `mpmath`, [-]
Notes
-----
Examples
--------
>>> eos = PRTranslatedTwu(T=300, P=1e5, Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=(0.694911, 0.9199, 1.7), c=-1e-6)
>>> eos.mpmath_volume_ratios
(mpc(real='0.99999999999999995', imag='0.0'), mpc(real='0.999999999999999965', imag='0.0'), mpc(real='1.00000000000000005', imag='0.0'))
'''
return tuple(i/j for i, j in zip(self.sorted_volumes, self.mpmath_volumes))
def Vs_mpmath(self):
r'''Method to calculate real roots of a cubic equation, using `mpmath`.
Returns
-------
Vs : list[mpf]
Either 1 or 3 real volumes as calculated by `mpmath`, [m^3/mol]
Notes
-----
Examples
--------
>>> eos = PRTranslatedTwu(T=300, P=1e5, Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=(0.694911, 0.9199, 1.7), c=-1e-6)
>>> eos.Vs_mpmath()
[mpf('0.0000489261705320261435106226558966745'), mpf('0.000541508154451321441068958547812526'), mpf('0.0243149463942697410611501615357228')]
'''
Vs = self.mpmath_volumes
good_roots = [i.real for i in Vs if (i.real > 0.0 and abs(i.imag/i.real) < 1E-12)]
good_roots.sort()
return good_roots
def volume_error(self):
r'''Method to calculate the relative absolute error in the calculated
molar volumes. This is computed with `mpmath`. If the number of real
roots is different between mpmath and the implemented solver, an
error of 1 is returned.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
error : float
relative absolute error in molar volumes , [-]
Notes
-----
Examples
--------
>>> eos = PRTranslatedTwu(T=300, P=1e5, Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=(0.694911, 0.9199, 1.7), c=-1e-6)
>>> eos.volume_error()
5.2192e-17
'''
# Vs_good, Vs = self.mpmath_volumes, self.sorted_volumes
# Compare the reals only if mpmath has the imaginary roots
Vs_good = self.volume_solutions_mp(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
Vs_filtered = [i.real for i in Vs_good if (i.real ==0 or abs(i.imag/i.real) < 1E-20) and i.real > self.b]
if len(Vs_filtered) in (2, 3):
two_roots_mpmath = True
Vl_mpmath, Vg_mpmath = min(Vs_filtered), max(Vs_filtered)
else:
if hasattr(self, 'V_l') and hasattr(self, 'V_g'):
# Wrong number of roots!
return 1
elif hasattr(self, 'V_l'):
Vl_mpmath = Vs_filtered[0]
elif hasattr(self, 'V_g'):
Vg_mpmath = Vs_filtered[0]
two_roots_mpmath = False
err = 0
if two_roots_mpmath:
if (not hasattr(self, 'V_l') or not hasattr(self, 'V_g')):
return 1.0
# Important not to confuse the roots and also to not consider the third root
try:
Vl = self.V_l
err_i = abs((Vl - Vl_mpmath)/Vl_mpmath)
if err_i > err:
err = err_i
except:
pass
try:
Vg = self.V_g
err_i = abs((Vg - Vg_mpmath)/Vg_mpmath)
if err_i > err:
err = err_i
except:
pass
return float(err)
def _mpmath_volume_matching(self, V):
'''Helper method which, given one of the three molar volume solutions
of the EOS, returns the mpmath molar volume which is nearest it.
'''
Vs = self.mpmath_volumes
rel_diffs = []
for Vi in Vs:
err = abs(Vi.real - V.real) + abs(Vi.imag - V.imag)
rel_diffs.append(err)
return Vs[rel_diffs.index(min(rel_diffs))]
@property
def V_l_mpmath(self):
r'''The molar volume of the liquid phase calculated with `mpmath` to
a higher precision, [m^3/mol]. This is useful for validating the
cubic root solver(s). It is not quite a true arbitrary solution to the
EOS, because the constants `b`,`epsilon`, `delta` and `a_alpha` as well
as the input arguments `T` and `P` are not calculated with arbitrary
precision. This is a feature when comparing the volume solution
algorithms however as they work with the same finite-precision
variables.
'''
if not hasattr(self, 'V_l'):
raise ValueError("Not solved for that volume")
return self._mpmath_volume_matching(self.V_l)
@property
def V_g_mpmath(self):
r'''The molar volume of the gas phase calculated with `mpmath` to
a higher precision, [m^3/mol]. This is useful for validating the
cubic root solver(s). It is not quite a true arbitrary solution to the
EOS, because the constants `b`,`epsilon`, `delta` and `a_alpha` as well
as the input arguments `T` and `P` are not calculated with arbitrary
precision. This is a feature when comparing the volume solution
algorithms however as they work with the same finite-precision
variables.
'''
if not hasattr(self, 'V_g'):
raise ValueError("Not solved for that volume")
return self._mpmath_volume_matching(self.V_g)
# def fugacities_mpmath(self, dps=30):
# # At one point thought maybe the fugacity equation was the source of error.
# # No. always the volume equation.
# import mpmath as mp
# mp.mp.dps = dps
# R_mp = mp.mpf(R)
# b, T, P, epsilon, delta, a_alpha = self.b, self.T, self.P, self.epsilon, self.delta, self.a_alpha
# b, T, P, epsilon, delta, a_alpha = [mp.mpf(i) for i in [b, T, P, epsilon, delta, a_alpha]]
#
# Vs_good = volume_solutions_mpmath(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
# Vs_filtered = [i.real for i in Vs_good if (i.real == 0 or abs(i.imag/i.real) < 1E-20) and i.real > self.b]
#
# if len(Vs_filtered) in (2, 3):
# Vs = min(Vs_filtered), max(Vs_filtered)
# else:
# if hasattr(self, 'V_l') and hasattr(self, 'V_g'):
# # Wrong number of roots!
# raise ValueError("Error")
# Vs = Vs_filtered
## elif hasattr(self, 'V_l'):
## Vs = Vs_filtered[0]
## elif hasattr(self, 'V_g'):
## Vg_mpmath = Vs_filtered[0]
#
# log, exp, atanh, sqrt = mp.log, mp.exp, mp.atanh, mp.sqrt
#
# return [P*exp((P*V + R_mp*T*log(V) - R_mp*T*log(P*V/(R_mp*T)) - R_mp*T*log(V - b)
# - R_mp*T - 2*a_alpha*atanh(2*V/sqrt(delta**2 - 4*epsilon)
# + delta/sqrt(delta**2 - 4*epsilon)).real/sqrt(delta**2 - 4*epsilon))/(R_mp*T))
# for V in Vs]
def volume_errors(self, Tmin=1e-4, Tmax=1e4, Pmin=1e-2, Pmax=1e9,
pts=50, plot=False, show=False, trunc_err_low=1e-18,
trunc_err_high=1.0, color_map=None, timing=False):
r'''Method to create a plot of the relative absolute error in the
cubic volume solution as compared to a higher-precision calculation.
This method is incredible valuable for the development of more reliable
floating-point based cubic solutions.
Parameters
----------
Tmin : float
Minimum temperature of calculation, [K]
Tmax : float
Maximum temperature of calculation, [K]
Pmin : float
Minimum pressure of calculation, [Pa]
Pmax : float
Maximum pressure of calculation, [Pa]
pts : int, optional
The number of points to include in both the `x` and `y` axis;
the validation calculation is slow, so increasing this too much
is not advisable, [-]
plot : bool
If False, the calculated errors are returned without plotting
the data, [-]
show : bool
Whether or not the plot should be rendered and shown; a handle to
it is returned if `plot` is True for other purposes such as saving
the plot to a file, [-]
trunc_err_low : float
Minimum plotted error; values under this are rounded to 0, [-]
trunc_err_high : float
Maximum plotted error; values above this are rounded to 1, [-]
color_map : matplotlib.cm.ListedColormap
Matplotlib colormap object, [-]
timing : bool
If True, plots the time taken by the volume root calculations
themselves; this can reveal whether the solvers are taking fast or
slow paths quickly, [-]
Returns
-------
errors : list[list[float]]
Relative absolute errors in the volume calculation (or timings in
seconds if `timing` is True), [-]
fig : matplotlib.figure.Figure
Plotted figure, only returned if `plot` is True, [-]
'''
if timing:
try:
from time import perf_counter
except:
from time import clock as perf_counter
Ts = logspace(log10(Tmin), log10(Tmax), pts)
Ps = logspace(log10(Pmin), log10(Pmax), pts)
kwargs = {}
if hasattr(self, 'zs'):
kwargs['zs'] = self.zs
kwargs['fugacities'] = False
errs = []
for T in Ts:
err_row = []
for P in Ps:
kwargs['T'] = T
kwargs['P'] = P
try:
obj = self.to(**kwargs)
except:
# So bad we failed to calculate a real point
val = 1.0
if timing:
t0 = perf_counter()
obj.volume_solutions(obj.T, obj.P, obj.b, obj.delta, obj.epsilon, obj.a_alpha)
val = perf_counter() - t0
else:
val = float(obj.volume_error())
if val > 1e-7:
print([obj.T, obj.P, obj.b, obj.delta, obj.epsilon, obj.a_alpha, 'coordinates of failure', obj])
err_row.append(val)
errs.append(err_row)
if plot:
import matplotlib.pyplot as plt
from matplotlib import ticker, cm
from matplotlib.colors import LogNorm
X, Y = np.meshgrid(Ts, Ps)
z = np.array(errs).T
fig, ax = plt.subplots()
if not timing:
if trunc_err_low is not None:
z[np.where(abs(z) < trunc_err_low)] = trunc_err_low
if trunc_err_high is not None:
z[np.where(abs(z) > trunc_err_high)] = trunc_err_high
if color_map is None:
color_map = cm.viridis
if not timing:
norm = LogNorm(vmin=trunc_err_low, vmax=trunc_err_high)
else:
z *= 1e-6
norm = None
im = ax.pcolormesh(X, Y, z, cmap=color_map, norm=norm)
cbar = fig.colorbar(im, ax=ax)
if timing:
cbar.set_label('Time [us]')
else:
cbar.set_label('Relative error')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('T [K]')
ax.set_ylabel('P [Pa]')
max_err = np.max(errs)
if trunc_err_low is not None and max_err < trunc_err_low:
max_err = 0
if trunc_err_high is not None and max_err > trunc_err_high:
max_err = trunc_err_high
if timing:
ax.set_title('Volume timings; max %.2e us' %(max_err*1e6))
else:
ax.set_title('Volume solution validation; max err %.4e' %(max_err))
if show:
plt.show()
return errs, fig
else:
return errs
def PT_surface_special(self, Tmin=1e-4, Tmax=1e4, Pmin=1e-2, Pmax=1e9,
pts=50, show=False, color_map=None,
mechanical=True, pseudo_critical=True, Psat=True,
determinant_zeros=True, phase_ID_transition=True,
base_property='V', base_min=None, base_max=None,
base_selection='Gmin'):
r'''Method to create a plot of the special curves of a fluid -
vapor pressure, determinant zeros, pseudo critical point,
and mechanical critical point.
The color background is a plot of the molar volume (by default) which
has the minimum Gibbs energy (by default). If shown with a sufficient
number of points, the curve between vapor and liquid should be shown
smoothly.
Parameters
----------
Tmin : float, optional
Minimum temperature of calculation, [K]
Tmax : float, optional
Maximum temperature of calculation, [K]
Pmin : float, optional
Minimum pressure of calculation, [Pa]
Pmax : float, optional
Maximum pressure of calculation, [Pa]
pts : int, optional
The number of points to include in both the `x` and `y` axis [-]
show : bool, optional
Whether or not the plot should be rendered and shown; a handle to
it is returned if `plot` is True for other purposes such as saving
the plot to a file, [-]
color_map : matplotlib.cm.ListedColormap, optional
Matplotlib colormap object, [-]
mechanical : bool, optional
Whether or not to include the mechanical critical point; this is
the same as the critical point for a pure compound but not for a
mixture, [-]
pseudo_critical : bool, optional
Whether or not to include the pseudo critical point; this is
the same as the critical point for a pure compound but not for a
mixture, [-]
Psat : bool, optional
Whether or not to include the vapor pressure curve; for mixtures
this is neither the bubble nor dew curve, but rather a hypothetical
one which uses the same equation as the pure components, [-]
determinant_zeros : bool, optional
Whether or not to include a curve showing when the EOS's
determinant hits zero, [-]
phase_ID_transition : bool, optional
Whether or not to show a curve of where the PIP hits 1 exactly, [-]
base_property : str, optional
The property which should be plotted; '_l' and '_g' are added
automatically according to the selected phase, [-]
base_min : float, optional
If specified, the `base` property will values will be limited to
this value at the minimum, [-]
base_max : float, optional
If specified, the `base` property will values will be limited to
this value at the maximum, [-]
base_selection : str, optional
For the base property, there are often two possible phases and but
only one value can be plotted; use 'l' to pefer liquid-like values,
'g' to prefer gas-like values, and 'Gmin' to prefer values of the
phase with the lowest Gibbs energy, [-]
Returns
-------
fig : matplotlib.figure.Figure
Plotted figure, only returned if `plot` is True, [-]
'''
Ts = logspace(log10(Tmin), log10(Tmax), pts)
Ps = logspace(log10(Pmin), log10(Pmax), pts)
kwargs = {}
if hasattr(self, 'zs'):
kwargs['zs'] = self.zs
l_prop = base_property + '_l'
g_prop = base_property + '_g'
base_positive = True
# Are we an ideal-gas?
if self.Zc == 1.0:
phase_ID_transition = False
Psat = False
Vs = []
for T in Ts:
V_row = []
for P in Ps:
kwargs['T'] = T
kwargs['P'] = P
obj = self.to(**kwargs)
if obj.phase == 'l/g':
if base_selection == 'Gmin':
V = getattr(obj, l_prop) if obj.G_dep_l < obj.G_dep_g else getattr(obj, g_prop)
elif base_selection == 'l':
V = getattr(obj, l_prop)
elif base_selection == 'g':
V = getattr(obj, g_prop)
else:
raise ValueError("Unknown value for base_selection")
elif obj.phase == 'l':
V = getattr(obj, l_prop)
else:
V = getattr(obj, g_prop)
if base_max is not None and V > base_max: V = base_max
if base_min is not None and V < base_min: V = base_min
V_row.append(V)
base_positive = base_positive and V > 0.0
Vs.append(V_row)
if self.multicomponent:
Tc, Pc = self.pseudo_Tc, self.pseudo_Pc
else:
Tc, Pc = self.Tc, self.Pc
if Psat:
Pmax_Psat = min(Pc, Pmax)
Pmin_Psat = max(1e-20, Pmin)
Tmin_Psat, Tmax_Psat = self.Tsat(Pmin_Psat), self.Tsat(Pmax_Psat)
if Tmin_Psat < Tmin or Tmin_Psat > Tmax: Tmin_Psat = Tmin
if Tmax_Psat > Tmax or Tmax_Psat < Tmin: Tmax_Psat = Tmax
Ts_Psats = []
Psats = []
for T in linspace(Tmin_Psat, Tmax_Psat, pts):
P = self.Psat(T)
Ts_Psats.append(T)
Psats.append(P)
if phase_ID_transition:
Pmin_Psat = max(1e-20, Pmin)
Tmin_ID = self.Tsat(Pmin_Psat)
Tmax_ID = Tmax
phase_ID_Ts = linspace(Tmin_ID, Tmax_ID, pts)
low_P_limit = min(1e-4, Pmin)
phase_ID_Ps = [self.P_PIP_transition(T, low_P_limit=low_P_limit)
for T in phase_ID_Ts]
if mechanical:
if self.multicomponent:
TP_mechanical = self.mechanical_critical_point()
else:
TP_mechanical = (Tc, Pc)
if determinant_zeros:
lows_det_Ps, high_det_Ps, Ts_dets_low, Ts_dets_high = [], [], [], []
for T in Ts:
a_alpha = self.a_alpha_and_derivatives(T, full=False)
P_dets = self.P_discriminant_zeros_analytical(T=T, b=self.b, delta=self.delta,
epsilon=self.epsilon, a_alpha=a_alpha, valid=True)
if P_dets:
P_det_min = min(P_dets)
P_det_max = max(P_dets)
if Pmin <= P_det_min <= Pmax:
lows_det_Ps.append(P_det_min)
Ts_dets_low.append(T)
if Pmin <= P_det_max <= Pmax:
high_det_Ps.append(P_det_max)
Ts_dets_high.append(T)
# if plot:
import matplotlib.pyplot as plt
from matplotlib import ticker, cm
from matplotlib.colors import LogNorm
X, Y = np.meshgrid(Ts, Ps)
z = np.array(Vs).T
fig, ax = plt.subplots()
if color_map is None:
color_map = cm.viridis
norm = LogNorm() if base_positive else None
im = ax.pcolormesh(X, Y, z, cmap=color_map, norm=norm)
cbar = fig.colorbar(im, ax=ax)
cbar.set_label('%s' %base_property)
if Psat:
plt.plot(Ts_Psats, Psats, label='Psat')
if determinant_zeros:
plt.plot(Ts_dets_low, lows_det_Ps, label='Low trans')
plt.plot(Ts_dets_high, high_det_Ps, label='High trans')
if pseudo_critical:
plt.plot([Tc], [Pc], 'x', label='Pseudo crit')
if mechanical:
plt.plot([TP_mechanical[0]], [TP_mechanical[1]], 'o', label='Mechanical')
if phase_ID_transition:
plt.plot(phase_ID_Ts, phase_ID_Ps, label='PIP=1')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel('T [K]')
ax.set_ylabel('P [Pa]')
if (Psat or determinant_zeros or pseudo_critical or mechanical
or phase_ID_transition):
plt.legend()
ax.set_title('%s vs minimum Gibbs validation' %(base_property))
if show:
plt.show()
return fig
def saturation_prop_plot(self, prop, Tmin=None, Tmax=None, pts=100,
plot=False, show=False, both=False):
r'''Method to create a plot of a specified property of the EOS along
the (pure component) saturation line.
Parameters
----------
prop : str
Property to be used; such as 'H_dep_l' ( when `both` is False)
or 'H_dep' (when `both` is True), [-]
Tmin : float
Minimum temperature of calculation; if this is too low the
saturation routines will stop converging, [K]
Tmax : float
Maximum temperature of calculation; cannot be above the critical
temperature, [K]
pts : int, optional
The number of temperature points to include [-]
plot : bool
If False, the calculated values and temperatures are returned
without plotting the data, [-]
show : bool
Whether or not the plot should be rendered and shown; a handle to
it is returned if `plot` is True for other purposes such as saving
the plot to a file, [-]
both : bool
When true, append '_l' and '_g' and draw both the liquid and vapor
property specified and return two different sets of values.
Returns
-------
Ts : list[float]
Logarithmically spaced temperatures in specified range, [K]
props : list[float]
The property specified if `both` is False; otherwise, the liquid
properties, [various]
props_g : list[float]
The gas properties, only returned if `both` is True, [various]
fig : matplotlib.figure.Figure
Plotted figure, only returned if `plot` is True, [-]
'''
if Tmax is None:
if self.multicomponent:
Tmax = self.pseudo_Tc
else:
Tmax = self.Tc
if Tmin is None:
Tmin = self.Tsat(1e-5)
Ts = logspace(log10(Tmin), log10(Tmax), pts)
kwargs = {}
if hasattr(self, 'zs'):
kwargs['zs'] = self.zs
props = []
if both:
props2 = []
prop_l = prop + '_l'
prop_g = prop + '_g'
for T in Ts:
kwargs['T'] = T
kwargs['P'] = self.Psat(T)
obj = self.to(**kwargs)
if both:
v = getattr(obj, prop_l)
try:
v = v()
except:
pass
props.append(v)
v = getattr(obj, prop_g)
try:
v = v()
except:
pass
props2.append(v)
else:
v = getattr(obj, prop)
try:
v = v()
except:
pass
props.append(v)
if plot:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
if both:
plt.plot(Ts, props, label='Liquid')
plt.plot(Ts, props2, label='Gas')
plt.legend()
else:
plt.plot(Ts, props)
ax.set_xlabel('Temperature [K]')
ax.set_ylabel(r'%s' %(prop))
ax.set_title(r'Saturation %s curve' %(prop))
if show:
plt.show()
if both:
return Ts, props, props2, fig
return Ts, props, fig
if both:
return Ts, props, props2
return Ts, props
def Psat_errors(self, Tmin=None, Tmax=None, pts=50, plot=False, show=False,
trunc_err_low=1e-18, trunc_err_high=1.0, Pmin=1e-100):
r'''Method to create a plot of vapor pressure and the relative error
of its calculation vs. the iterative `polish` approach.
Parameters
----------
Tmin : float
Minimum temperature of calculation; if this is too low the
saturation routines will stop converging, [K]
Tmax : float
Maximum temperature of calculation; cannot be above the critical
temperature, [K]
pts : int, optional
The number of temperature points to include [-]
plot : bool
If False, the solution is returned without plotting the data, [-]
show : bool
Whether or not the plot should be rendered and shown; a handle to
it is returned if `plot` is True for other purposes such as saving
the plot to a file, [-]
trunc_err_low : float
Minimum plotted error; values under this are rounded to 0, [-]
trunc_err_high : float
Maximum plotted error; values above this are rounded to 1, [-]
Pmin : float
Minimum pressure for the solution to work on, [Pa]
Returns
-------
errors : list[float]
Absolute relative errors, [-]
Psats_num : list[float]
Vapor pressures calculated to full precision, [Pa]
Psats_fit : list[float]
Vapor pressures calculated with the fast solution, [Pa]
fig : matplotlib.figure.Figure
Plotted figure, only returned if `plot` is True, [-]
'''
try:
Tc = self.Tc
except:
Tc = self.pseudo_Tc
if Tmax is None:
Tmax = Tc
if Tmin is None:
Tmin = .1*Tc
try:
# Can we get the direct temperature for Pmin
if Pmin is not None:
Tmin_Pmin = self.Tsat(P=Pmin, polish=True)
except:
Tmin_Pmin = None
if Tmin_Pmin is not None:
Tmin = max(Tmin, Tmin_Pmin)
Ts = logspace(log10(Tmin), log10(Tmax), int(pts/3))
Ts[-1] = Tmax
Ts_mid = linspace(Tmin, Tmax, int(pts/3))
Ts_high = linspace(Tmax*.99, Tmax, int(pts/3))
Ts = list(sorted(Ts_high + Ts + Ts_mid))
Ts_worked, Psats_num, Psats_fit = [], [], []
for T in Ts:
failed = False
try:
Psats_fit.append(self.Psat(T, polish=False))
except NoSolutionError:
# Trust the fit - do not continue if no good
continue
except Exception as e:
raise ValueError("Failed to converge at %.16f K with unexpected error" %(T), e, self)
try:
Psat_polished = self.Psat(T, polish=True)
Psats_num.append(Psat_polished)
except Exception as e:
failed = True
raise ValueError("Failed to converge at %.16f K with unexpected error" %(T), e, self)
Ts_worked.append(T)
Ts = Ts_worked
errs = np.array([abs(i-j)/i for i, j in zip(Psats_num, Psats_fit)])
if plot:
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
if trunc_err_low is not None:
errs[np.where(abs(errs) < trunc_err_low)] = trunc_err_low
if trunc_err_high is not None:
errs[np.where(abs(errs) > trunc_err_high)] = trunc_err_high
Trs = np.array(Ts)/Tc
ax1.plot(Trs, errs)
ax2.plot(Trs, Psats_num)
ax2.plot(Trs, Psats_fit)
ax1.set_yscale('log')
ax1.set_xscale('log')
ax2.set_yscale('log')
ax2.set_xscale('log')
ax1.set_xlabel('Tr [-]')
ax1.set_ylabel('AARD [-]')
ax2.set_ylabel('Psat [Pa]')
max_err = np.max(errs)
if trunc_err_low is not None and max_err < trunc_err_low:
max_err = 0
if trunc_err_high is not None and max_err > trunc_err_high:
max_err = trunc_err_high
ax1.set_title('Vapor pressure validation; max rel err %.4e' %(max_err))
if show:
plt.show()
return errs, Psats_num, Psats_fit, fig
else:
return errs, Psats_num, Psats_fit
# def PIP_map(self, Tmin=1e-4, Tmax=1e4, Pmin=1e-2, Pmax=1e9,
# pts=50, plot=False, show=False, color_map=None):
# # TODO rename PIP_ID_map or add flag to change if it plots PIP or bools.
# # TODO add doc
# Ts = logspace(log10(Tmin), log10(Tmax), pts)
# Ps = logspace(log10(Pmin), log10(Pmax), pts)
# kwargs = {}
# if hasattr(self, 'zs'):
# kwargs['zs'] = self.zs
#
# PIPs = []
# for T in Ts:
# PIP_row = []
# for P in Ps:
# kwargs['T'] = T
# kwargs['P'] = P
# obj = self.to(**kwargs)
## v = obj.discriminant
## # need make negatives under 1, positive above 1
## if v > 0.0:
## v = (1.0 + (1e10 - 1.0)/(1.0 + trunc_exp(-v)))
## else:
## v = (1e-10 + (1.0 - 1e-10)/(1.0 + trunc_exp(-v)))
#
# if obj.phase == 'l/g':
# v = 1
# elif obj.phase == 'g':
# v = 0
# elif obj.phase == 'l':
# v = 2
# PIP_row.append(v)
# PIPs.append(PIP_row)
#
# if plot:
# import matplotlib.pyplot as plt
# from matplotlib import ticker, cm
# from matplotlib.colors import LogNorm
# X, Y = np.meshgrid(Ts, Ps)
# z = np.array(PIPs).T
# fig, ax = plt.subplots()
# if color_map is None:
# color_map = cm.viridis
#
# im = ax.pcolormesh(X, Y, z, cmap=color_map,
## norm=LogNorm(vmin=1e-10, vmax=1e10)
# )
# cbar = fig.colorbar(im, ax=ax)
# cbar.set_label('PIP')
#
# ax.set_yscale('log')
# ax.set_xscale('log')
# ax.set_xlabel('T [K]')
# ax.set_ylabel('P [Pa]')
#
#
# ax.set_title('Volume root/phase ID validation')
# if show:
# plt.show()
#
# return PIPs, fig
def derivatives_and_departures(self, T, P, V, b, delta, epsilon, a_alpha, da_alpha_dT, d2a_alpha_dT2, quick=True):
dP_dT, dP_dV, d2P_dT2, d2P_dV2, d2P_dTdV, H_dep, S_dep, Cv_dep = (
self.main_derivatives_and_departures(T, P, V, b, delta, epsilon,
a_alpha, da_alpha_dT,
d2a_alpha_dT2))
try:
dV_dP = 1.0/dP_dV
except:
dV_dP = inf
dT_dP = 1./dP_dT
dV_dT = -dP_dT*dV_dP
dT_dV = 1./dV_dT
dV_dP2 = dV_dP*dV_dP
dV_dP3 = dV_dP*dV_dP2
inverse_dP_dT2 = dT_dP*dT_dP
inverse_dP_dT3 = inverse_dP_dT2*dT_dP
d2V_dP2 = -d2P_dV2*dV_dP3 # unused
d2T_dP2 = -d2P_dT2*inverse_dP_dT3 # unused
d2T_dV2 = (-(d2P_dV2*dP_dT - dP_dV*d2P_dTdV)*inverse_dP_dT2
+(d2P_dTdV*dP_dT - dP_dV*d2P_dT2)*inverse_dP_dT3*dP_dV) # unused
d2V_dT2 = (-(d2P_dT2*dP_dV - dP_dT*d2P_dTdV)*dV_dP2 # unused
+(d2P_dTdV*dP_dV - dP_dT*d2P_dV2)*dV_dP3*dP_dT)
d2V_dPdT = -(d2P_dTdV*dP_dV - dP_dT*d2P_dV2)*dV_dP3 # unused
d2T_dPdV = -(d2P_dTdV*dP_dT - dP_dV*d2P_dT2)*inverse_dP_dT3 # unused
return (dP_dT, dP_dV, dV_dT, dV_dP, dT_dV, dT_dP,
d2P_dT2, d2P_dV2, d2V_dT2, d2V_dP2, d2T_dV2, d2T_dP2,
d2V_dPdT, d2P_dTdV, d2T_dPdV, # d2P_dTdV is used
H_dep, S_dep, Cv_dep)
@property
def sorted_volumes(self):
r'''List of lexicographically-sorted molar volumes available from the
root finding algorithm used to solve the PT point. The convention of
sorting lexicographically comes from numpy's handling of complex
numbers, which python does not define. This method was added to
facilitate testing, as the volume solution method changes over time
and the ordering does as well.
Examples
--------
>>> PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6).sorted_volumes
((0.000130222125139+0j), (0.00112363131346-0.00129269672343j), (0.00112363131346+0.00129269672343j))
'''
sort_fun = lambda x: (x.real, x.imag)
full_volumes = self.volume_solutions_full(self.T, self.P, self.b, self.delta, self.epsilon, self.a_alpha)
full_volumes = [i + 0.0j for i in full_volumes]
return tuple(sorted(full_volumes, key=sort_fun))
def Tsat(self, P, polish=False):
r'''Generic method to calculate the temperature for a specified
vapor pressure of the pure fluid.
This is simply a bounded solver running between `0.2Tc` and `Tc` on the
`Psat` method.
Parameters
----------
P : float
Vapor pressure, [Pa]
polish : bool, optional
Whether to attempt to use a numerical solver to make the solution
more precise or not
Returns
-------
Tsat : float
Temperature of saturation, [K]
Notes
-----
It is recommended not to run with `polish=True`, as that will make the
calculation much slower.
'''
fprime = False
global curr_err
def to_solve_newton(T):
global curr_err
assert T > 0.0
e = self.to_TP(T, P)
try:
fugacity_l = e.fugacity_l
except AttributeError as err:
raise err
try:
fugacity_g = e.fugacity_g
except AttributeError as err:
raise err
curr_err = fugacity_l - fugacity_g
if fprime:
d_err_d_T = e.dfugacity_dT_l - e.dfugacity_dT_g
return curr_err, d_err_d_T
# print('err', err, 'rel err', err/T, 'd_err_d_T', d_err_d_T, 'T', T)
return curr_err
logP = log(P)
def to_solve(T):
global curr_err
if fprime:
dPsat_dT, Psat = self.dPsat_dT(T, polish=polish, also_Psat=True)
curr_err = Psat - P
# Log translation - tends to save a few iterations
err_trans = log(Psat) - logP
return err_trans, dPsat_dT/Psat
# return curr_err, derr_dT
curr_err = self.Psat(T, polish=polish) - P
return curr_err#, derr_dT
# return copysign(log(abs(err)), err)
# Outstanding improvements to do: Better guess; get NR working;
# see if there is a general curve
try:
Tc, Pc = self.Tc, self.Pc
except:
Tc, Pc = self.pseudo_Tc, self.pseudo_Pc
guess = -5.4*Tc/(1.0*log(P/Pc) - 5.4)
high = guess*2.0
low = guess*0.5
# return newton(to_solve, guess, fprime=True, ytol=1e-6, high=self.Pc)
# return newton(to_solve, guess, ytol=1e-6, high=self.Pc)
# Methanol is a good example of why 1.5 is needed
low_hope, high_hope = max(guess*.5, 0.2*Tc), min(Tc, guess*1.5)
try:
err_low, err_high = to_solve(low_hope), to_solve(high_hope)
if err_low*err_high < 0.0:
if guess < low_hope or guess > high_hope:
guess = 0.5*(low_hope + high_hope)
fprime = True
Tsat = newton(to_solve, guess, xtol=1.48e-10,fprime=True, low=low_hope, high=high_hope, bisection=True)
# fprime = False
# Tsat = brenth(to_solve, low_hope, high_hope)
abs_rel_err = abs(curr_err)/P
if abs_rel_err < 1e-9:
return Tsat
elif abs_rel_err < 1e-2:
guess = Tsat
else:
try:
return brenth(to_solve, 0.2*Tc, Tc)
except:
try:
return brenth(to_solve, 0.2*Tc, Tc*1.5)
except:
pass
except:
pass
fprime = True
try:
try:
Tsat = newton(to_solve_newton, guess, fprime=True, maxiter=100,
xtol=4e-13, require_eval=False, damping=1.0, low=Tc*1e-5)
except:
try:
Tsat = newton(to_solve_newton, guess, fprime=True, maxiter=100,
xtol=4e-13, require_eval=False, damping=1.0, low=low, high=high)
assert Tsat != low and Tsat != high
except:
Tsat = newton(to_solve_newton, guess, fprime=True, maxiter=250, # the wider range can take more iterations
xtol=4e-13, require_eval=False, damping=1.0, low=low, high=high*2)
assert Tsat != low and Tsat != high*2
except:
# high = self.Tc
# try:
# high = min(high, self.T_discriminant_zero_l()*(1-1e-8))
# except:
# pass
# Does not seem to be working
try:
Tsat = None
Tsat = newton(to_solve_newton, guess, fprime=True, maxiter=200, high=high, low=low,
xtol=4e-13, require_eval=False, damping=1.0)
except:
pass
fprime = False
if Tsat is None or abs(to_solve_newton(Tsat)) == P:
Tsat = brenth(to_solve_newton, low, high)
return Tsat
def Psat(self, T, polish=False, guess=None):
r'''Generic method to calculate vapor pressure for a specified `T`.
From Tc to 0.32Tc, uses a 10th order polynomial of the following form:
.. math::
\ln\frac{P_r}{T_r} = \sum_{k=0}^{10} C_k\left(\frac{\alpha}{T_r}
-1\right)^{k}
If `polish` is True, SciPy's `newton` solver is launched with the
calculated vapor pressure as an initial guess in an attempt to get more
accuracy. This may not converge however.
Results above the critical temperature are meaningless. A first-order
polynomial is used to extrapolate under 0.32 Tc; however, there is
normally not a volume solution to the EOS which can produce that
low of a pressure.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to attempt to use a numerical solver to make the solution
more precise or not
Returns
-------
Psat : float
Vapor pressure, [Pa]
Notes
-----
EOSs sharing the same `b`, `delta`, and `epsilon` have the same
coefficient sets.
Form for the regression is inspired from [1]_.
No volume solution is needed when `polish=False`; the only external
call is for the value of `a_alpha`.
References
----------
.. [1] Soave, G. "Direct Calculation of Pure-Compound Vapour Pressures
through Cubic Equations of State." Fluid Phase Equilibria 31, no. 2
(January 1, 1986): 203-7. doi:10.1016/0378-3812(86)90013-0.
'''
Tc, Pc = self.Tc, self.Pc
if T == Tc:
return Pc
a_alpha = self.a_alpha_and_derivatives(T, full=False)
alpha = a_alpha/self.a
Tr = T/self.Tc
x = alpha/Tr - 1.
if Tr > 0.999 and not isinstance(self, RK):
y = horner(self.Psat_coeffs_critical, x)
Psat = y*Tr*Pc
if Psat > Pc and T < Tc:
Psat = Pc*(1.0 - 1e-14)
else:
# TWUPR/SRK TODO need to be prepared for x being way outside the range (in the weird direction - at the start)
Psat_ranges_low = self.Psat_ranges_low
if x > Psat_ranges_low[-1]:
if not polish:
raise NoSolutionError("T %.8f K is too low for equations to converge" %(T))
else:
# Needs to still be here for generating better data
x = Psat_ranges_low[-1]
polish = True
for i in range(len(Psat_ranges_low)):
if x < Psat_ranges_low[i]:
break
y = 0.0
for c in self.Psat_coeffs_low[i]:
y = y*x + c
try:
Psat = exp(y)*Tr*Pc
if Psat == 0.0:
if polish:
Psat = 1e-100
else:
raise NoSolutionError("T %.8f K is too low for equations to converge" %(T))
except OverflowError:
# coefficients sometimes overflow before T is lowered to 0.32Tr
# For
polish = True # There is no solution available to polish
Psat = 1
if polish:
if T > Tc:
raise ValueError("Cannot solve for equifugacity condition "
"beyond critical temperature")
if guess is not None:
Psat = guess
converged = False
def to_solve_newton(P):
# For use by newton. Only supports initialization with Tc, Pc and omega
# ~200x slower and not guaranteed to converge (primary issue is one phase)
# not existing
assert P > 0.0
e = self.to_TP(T, P)
# print(e.volume_error(), e)
try:
fugacity_l = e.fugacity_l
except AttributeError as err:
# return 1000, 1000
raise err
try:
fugacity_g = e.fugacity_g
except AttributeError as err:
# return 1000, 1000
raise err
err = fugacity_l - fugacity_g
d_err_d_P = e.dfugacity_dP_l - e.dfugacity_dP_g # -1 for low pressure
if isnan(d_err_d_P):
d_err_d_P = -1.0
# print('err', err, 'rel err', err/P, 'd_err_d_P', d_err_d_P, 'P', P)
# Clamp the derivative - if it will step to zero or negative, dampen to half the distance which gets to zero
if (P - err/d_err_d_P) <= 0.0: # This is the one matching newton
# if (P - err*d_err_d_P) <= 0.0:
d_err_d_P = -1.0001
return err, d_err_d_P
try:
try:
boundaries = GCEOS.P_discriminant_zeros_analytical(T, self.b, self.delta, self.epsilon, a_alpha, valid=True)
low, high = min(boundaries), max(boundaries)
except:
pass
try:
high = self.P_discriminant_zero()
except:
high = Pc
# def damping_func(p0, step, damping):
# if step == 1:
# damping = damping*0.5
# p = p0 + step * damping
# return p
Psat = newton(to_solve_newton, Psat, high=high, fprime=True, maxiter=100,
xtol=4e-13, require_eval=False, damping=1.0) # ,ytol=1e-6*Psat # damping_func=damping_func
# print(to_solve_newton(Psat), 'newton error')
converged = True
except:
pass
if not converged:
def to_solve_bisect(P):
e = self.to_TP(T, P)
# print(e.volume_error(), e)
try:
fugacity_l = e.fugacity_l
except AttributeError as err:
return 1e20
try:
fugacity_g = e.fugacity_g
except AttributeError as err:
return -1e20
err = fugacity_l - fugacity_g
# print(err, 'err', 'P', P)
return err
for low, high in zip([.98*Psat, 1, 1e-40, Pc*.9, Psat*.9999], [1.02*Psat, Pc, 1, Pc*1.000000001, Pc]):
try:
Psat = bisect(to_solve_bisect, low, high, ytol=1e-6*Psat, maxiter=128)
# print(to_solve_bisect(Psat), 'bisect error')
converged = True
break
except:
pass
# Last ditch attempt
if not converged:
# raise ValueError("Could not converge")
if Tr > 0.5:
# Near critical temperature issues
points = [Pc*f for f in linspace(1e-3, 1-1e-8, 50) + linspace(.9, 1-1e-8, 50)]
ytol = 1e-6*Psat
else:
# Low temperature issues
points = [Psat*f for f in logspace(-5.5, 5.5, 16)]
# points = [Psat*f for f in logspace(-2.5, 2.5, 100)]
ytol = None # Cryogenic point unlikely to work to desired tolerance
# Work on point closer to Psat first
points.sort(key=lambda x: abs(log10(x)))
low, high = None, None
for point in points:
try:
err = to_solve_newton(point)[0] # Do not use bisect function as it does not raise errors
if err > 0.0:
high = point
elif err < 0.0:
low = point
except:
pass
if low is not None and high is not None:
# print('reached bisection')
Psat = brenth(to_solve_bisect, low, high, ytol=ytol, maxiter=128)
# print(to_solve_bisect(Psat), 'bisect error')
converged = True
break
# print('tried all points')
# Check that the fugacity error vs. Psat is OK
if abs(to_solve_bisect(Psat)/Psat) > .0001:
converged = False
if not converged:
raise ValueError("Could not converge at T=%.6f K" %(T))
return Psat
def dPsat_dT(self, T, polish=False, also_Psat=False):
r'''Generic method to calculate the temperature derivative of vapor
pressure for a specified `T`. Implements the analytical derivative
of the three polynomials described in `Psat`.
As with `Psat`, results above the critical temperature are meaningless.
The first-order polynomial which is used to calculate it under 0.32 Tc
may not be physicall meaningful, due to there normally not being a
volume solution to the EOS which can produce that low of a pressure.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to attempt to use a numerical solver to make the solution
more precise or not
also_Psat : bool, optional
Calculating `dPsat_dT` necessarily involves calculating `Psat`;
when this is set to True, a second return value is added, whic is
the actual `Psat` value.
Returns
-------
dPsat_dT : float
Derivative of vapor pressure with respect to temperature, [Pa/K]
Psat : float, returned if `also_Psat` is `True`
Vapor pressure, [Pa]
Notes
-----
There is a small step change at 0.32 Tc for all EOS due to the two
switch between polynomials at that point.
Useful for calculating enthalpy of vaporization with the Clausius
Clapeyron Equation. Derived with SymPy's diff and cse.
'''
if polish:
# Calculate the derivative of saturation pressure analytically
Psat = self.Psat(T, polish=polish)
sat_eos = self.to(T=T, P=Psat)
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
if also_Psat:
return dPsat_dT, Psat
return dPsat_dT
a_alphas = self.a_alpha_and_derivatives(T)
a_inv = 1.0/self.a
try:
Tc, Pc = self.Tc, self.Pc
except:
Tc, Pc = self.pseudo_Tc, self.pseudo_Pc
alpha, d_alpha_dT = a_alphas[0]*a_inv, a_alphas[1]*a_inv
Tc_inv = 1.0/Tc
T_inv = 1.0/T
Tr = T*Tc_inv
# if Tr < 0.32 and not isinstance(self, PR):
# # Delete
# c = self.Psat_coeffs_limiting
# return self.Pc*T*c[0]*(self.Tc*d_alpha_dT/T - self.Tc*alpha/(T*T)
# )*exp(c[0]*(-1. + self.Tc*alpha/T) + c[1]
# )/self.Tc + self.Pc*exp(c[0]*(-1.
# + self.Tc*alpha/T) + c[1])/self.Tc
if Tr > 0.999 and not isinstance(self, RK):
# OK
x = alpha/Tr - 1.
y = horner(self.Psat_coeffs_critical, x)
dy_dT = T_inv*(Tc*d_alpha_dT - Tc*alpha*T_inv)*horner(self.Psat_coeffs_critical_der, x)
dPsat_dT = Pc*(T*dy_dT*Tc_inv + y*Tc_inv)
if also_Psat:
Psat = y*Tr*Pc
return dPsat_dT, Psat
return dPsat_dT
else:
Psat_coeffs_low = self.Psat_coeffs_low
Psat_ranges_low = self.Psat_ranges_low
x = alpha/Tr - 1.
if x > Psat_ranges_low[-1]:
raise NoSolutionError("T %.8f K is too low for equations to converge" %(T))
for i in range(len(Psat_ranges_low)):
if x < Psat_ranges_low[i]:
break
y, dy = 0.0, 0.0
for c in Psat_coeffs_low[i]:
dy = x*dy + y
y = x*y + c
exp_y = exp(y)
dy_dT = Tc*T_inv*(d_alpha_dT - alpha*T_inv)*dy#horner_and_der(Psat_coeffs_low[i], x)[1]
Psat = exp_y*Tr*Pc
dPsat_dT = (T*dy_dT + 1.0)*Pc*exp_y*Tc_inv
if also_Psat:
return dPsat_dT, Psat
return dPsat_dT
# # change chebval to horner, and get new derivative
# x = alpha/Tr - 1.
# arg = (self.Psat_cheb_constant_factor[1]*(x + self.Psat_cheb_constant_factor[0]))
# y = chebval(arg, self.Psat_cheb_coeffs)
#
# exp_y = exp(y)
# dy_dT = T_inv*(Tc*d_alpha_dT - Tc*alpha*T_inv)*chebval(arg,
# self.Psat_cheb_coeffs_der)*self.Psat_cheb_constant_factor[1]
# Psat = Pc*T*exp_y*dy_dT*Tc_inv + Pc*exp_y*Tc_inv
# return Psat
def phi_sat(self, T, polish=True):
r'''Method to calculate the saturation fugacity coefficient of the
compound. This does not require solving the EOS itself.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
phi_sat : float
Fugacity coefficient along the liquid-vapor saturation line, [-]
Notes
-----
Accuracy is generally around 1e-7. If Tr is under 0.32, the rigorous
method is always used, but a solution may not exist if both phases
cannot coexist. If Tr is above 1, likewise a solution does not exist.
'''
Tr = T/self.Tc
if polish or not 0.32 <= Tr <= 1.0:
e = self.to_TP(T=T, P=self.Psat(T, polish=True)) # True
try:
return e.phi_l
except:
return e.phi_g
alpha = self.a_alpha_and_derivatives(T, full=False)/self.a
x = alpha/Tr - 1.
return horner(self.phi_sat_coeffs, x)
def dphi_sat_dT(self, T, polish=True):
r'''Method to calculate the temperature derivative of saturation
fugacity coefficient of the
compound. This does require solving the EOS itself.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
dphi_sat_dT : float
First temperature derivative of fugacity coefficient along the
liquid-vapor saturation line, [1/K]
Notes
-----
'''
if T == self.Tc:
T = (self.Tc*(1.0 - 1e-15))
Psat = self.Psat(T, polish=polish)
sat_eos = self.to(T=T, P=Psat)
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
fugacity = sat_eos.fugacity_l
dfugacity_sat_dT = dPsat_dT*sat_eos.dfugacity_dP_l + sat_eos.dfugacity_dT_l
Psat_inv = 1.0/Psat
return (dfugacity_sat_dT - fugacity*dPsat_dT*Psat_inv)*Psat_inv
def d2phi_sat_dT2(self, T, polish=True):
r'''Method to calculate the second temperature derivative of saturation
fugacity coefficient of the
compound. This does require solving the EOS itself.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
d2phi_sat_dT2 : float
Second temperature derivative of fugacity coefficient along the
liquid-vapor saturation line, [1/K^2]
Notes
-----
This is presently a numerical calculation.
'''
return derivative(lambda T: self.dphi_sat_dT(T, polish=polish), T,
dx=T*1e-7, upper_limit=self.Tc)
def V_l_sat(self, T):
r'''Method to calculate molar volume of the liquid phase along the
saturation line.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
V_l_sat : float
Liquid molar volume along the saturation line, [m^3/mol]
Notes
-----
Computes `Psat`, and then uses `volume_solutions` to obtain the three
possible molar volumes. The lowest value is returned.
'''
Psat = self.Psat(T)
a_alpha = self.a_alpha_and_derivatives(T, full=False)
Vs = self.volume_solutions(T, Psat, self.b, self.delta, self.epsilon, a_alpha)
# Assume we can safely take the Vmax as gas, Vmin as l on the saturation line
return min([i.real for i in Vs if i.real > self.b])
def V_g_sat(self, T):
r'''Method to calculate molar volume of the vapor phase along the
saturation line.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
V_g_sat : float
Gas molar volume along the saturation line, [m^3/mol]
Notes
-----
Computes `Psat`, and then uses `volume_solutions` to obtain the three
possible molar volumes. The highest value is returned.
'''
Psat = self.Psat(T)
a_alpha = self.a_alpha_and_derivatives(T, full=False)
Vs = self.volume_solutions(T, Psat, self.b, self.delta, self.epsilon, a_alpha)
# Assume we can safely take the Vmax as gas, Vmin as l on the saturation line
return max([i.real for i in Vs])
def Hvap(self, T):
r'''Method to calculate enthalpy of vaporization for a pure fluid from
an equation of state, without iteration.
.. math::
\frac{dP^{sat}}{dT}=\frac{\Delta H_{vap}}{T(V_g - V_l)}
Results above the critical temperature are meaningless. A first-order
polynomial is used to extrapolate under 0.32 Tc; however, there is
normally not a volume solution to the EOS which can produce that
low of a pressure.
Parameters
----------
T : float
Temperature, [K]
Returns
-------
Hvap : float
Increase in enthalpy needed for vaporization of liquid phase along
the saturation line, [J/mol]
Notes
-----
Calculates vapor pressure and its derivative with `Psat` and `dPsat_dT`
as well as molar volumes of the saturation liquid and vapor phase in
the process.
Very near the critical point this provides unrealistic results due to
`Psat`'s polynomials being insufficiently accurate.
References
----------
.. [1] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
Psat = self.Psat(T)
dPsat_dT = self.dPsat_dT(T)
a_alpha = self.a_alpha_and_derivatives(T, full=False)
Vs = self.volume_solutions(T, Psat, self.b, self.delta, self.epsilon, a_alpha)
# Assume we can safely take the Vmax as gas, Vmin as l on the saturation line
Vs = [i.real for i in Vs]
V_l, V_g = min(Vs), max(Vs)
return dPsat_dT*T*(V_g - V_l)
def dH_dep_dT_sat_l(self, T, polish=False):
r'''Method to calculate and return the temperature derivative of
saturation liquid excess enthalpy.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
dH_dep_dT_sat_l : float
Liquid phase temperature derivative of excess enthalpy along the
liquid-vapor saturation line, [J/mol/K]
Notes
-----
'''
sat_eos = self.to(T=T, P=self.Psat(T, polish=polish))
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
return dPsat_dT*sat_eos.dH_dep_dP_l + sat_eos.dH_dep_dT_l
def dH_dep_dT_sat_g(self, T, polish=False):
r'''Method to calculate and return the temperature derivative of
saturation vapor excess enthalpy.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
dH_dep_dT_sat_g : float
Vapor phase temperature derivative of excess enthalpy along the
liquid-vapor saturation line, [J/mol/K]
Notes
-----
'''
sat_eos = self.to(T=T, P=self.Psat(T, polish=polish))
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
return dPsat_dT*sat_eos.dH_dep_dP_g + sat_eos.dH_dep_dT_g
def dS_dep_dT_sat_g(self, T, polish=False):
r'''Method to calculate and return the temperature derivative of
saturation vapor excess entropy.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
dS_dep_dT_sat_g : float
Vapor phase temperature derivative of excess entropy along the
liquid-vapor saturation line, [J/mol/K^2]
Notes
-----
'''
sat_eos = self.to(T=T, P=self.Psat(T, polish=polish))
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
return dPsat_dT*sat_eos.dS_dep_dP_g + sat_eos.dS_dep_dT_g
def dS_dep_dT_sat_l(self, T, polish=False):
r'''Method to calculate and return the temperature derivative of
saturation liquid excess entropy.
Parameters
----------
T : float
Temperature, [K]
polish : bool, optional
Whether to perform a rigorous calculation or to use a polynomial
fit, [-]
Returns
-------
dS_dep_dT_sat_l : float
Liquid phase temperature derivative of excess entropy along the
liquid-vapor saturation line, [J/mol/K^2]
Notes
-----
'''
sat_eos = self.to(T=T, P=self.Psat(T, polish=polish))
dfg_T, dfl_T = sat_eos.dfugacity_dT_g, sat_eos.dfugacity_dT_l
dfg_P, dfl_P = sat_eos.dfugacity_dP_g, sat_eos.dfugacity_dP_l
dPsat_dT = (dfg_T - dfl_T)/(dfl_P - dfg_P)
return dPsat_dT*sat_eos.dS_dep_dP_l + sat_eos.dS_dep_dT_l
def a_alpha_for_V(self, T, P, V):
r'''Method to calculate which value of :math:`a \alpha` is required for
a given `T`, `P` pair to match a specified `V`. This is a
straightforward analytical equation.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
Returns
-------
a_alpha : float
Value calculated to match specified volume for the current EOS,
[J^2/mol^2/Pa]
Notes
-----
The derivation of the solution is as follows:
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, b, a, delta, epsilon = symbols('P, T, V, R, b, a, delta, epsilon') # doctest:+SKIP
>>> a_alpha = symbols('a_alpha') # doctest:+SKIP
>>> CUBIC = R*T/(V-b) - a_alpha/(V*V + delta*V + epsilon) # doctest:+SKIP
>>> solve(Eq(CUBIC, P), a_alpha)# doctest:+SKIP
[(-P*V**3 + P*V**2*b - P*V**2*delta + P*V*b*delta - P*V*epsilon + P*b*epsilon + R*T*V**2 + R*T*V*delta + R*T*epsilon)/(V - b)]
'''
b, delta, epsilon = self.b, self.delta, self.epsilon
x0 = P*b
x1 = R*T
x2 = V*delta
x3 = V*V
x4 = x3*V
return ((-P*x4 - P*V*epsilon - P*delta*x3 + epsilon*x0 + epsilon*x1
+ x0*x2 + x0*x3 + x1*x2 + x1*x3)/(V - b))
def a_alpha_for_Psat(self, T, Psat, a_alpha_guess=None):
r'''Method to calculate which value of :math:`a \alpha` is required for
a given `T`, `Psat` pair. This is a numerical solution, but not a very
complicated one.
Parameters
----------
T : float
Temperature, [K]
Psat : float
Vapor pressure specified, [Pa]
a_alpha_guess : float
Optionally, an initial guess for the solver [J^2/mol^2/Pa]
Returns
-------
a_alpha : float
Value calculated to match specified volume for the current EOS,
[J^2/mol^2/Pa]
Notes
-----
The implementation of this function is a direct calculation of
departure gibbs energy, which is equal in both phases at saturation.
Examples
--------
>>> eos = PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.a_alpha_for_Psat(T=400, Psat=5e5)
3.1565798926
'''
P = Psat
b, delta, epsilon = self.b, self.delta, self.epsilon
RT = R*T
RT_inv = 1.0/RT
x0 = 1.0/sqrt(delta*delta - 4.0*epsilon)
x1 = delta*x0
x2 = 2.0*x0
def fug(V, a_alpha):
# Can simplify this to not use a function, avoid 1 log anywayS
G_dep = (P*V - RT - RT*log(P*RT_inv*(V-b))
- x2*a_alpha*catanh(2.0*V*x0 + x1).real)
return G_dep # No point going all the way to fugacity
def err(a_alpha):
# Needs some work right up to critical point
Vs = self.volume_solutions(T, P, b, delta, epsilon, a_alpha)
good_roots = [i.real for i in Vs if i.imag == 0.0 and i.real > 0.0]
good_root_count = len(good_roots)
if good_root_count == 1:
raise ValueError("Guess did not have two roots")
V_l, V_g = min(good_roots), max(good_roots)
# print(V_l, V_g, a_alpha)
return fug(V_l, a_alpha) - fug(V_g, a_alpha)
if a_alpha_guess is None:
try:
a_alpha_guess = self.a_alpha
except AttributeError:
a_alpha_guess = 0.002
try:
return secant(err, a_alpha_guess, xtol=1e-13)
except:
return secant(err, self.to(T=T, P=Psat).a_alpha, xtol=1e-13)
def to_TP(self, T, P):
r'''Method to construct a new EOS object at the spcified `T` and `P`.
In the event the `T` and `P` match the current object's `T` and `P`,
it will be returned unchanged.
Parameters
----------
T : float
Temperature, [K]
P : float
Pressure, [Pa]
Returns
-------
obj : EOS
Pure component EOS at specified `T` and `P`, [-]
Notes
-----
Constructs the object with parameters `Tc`, `Pc`, `omega`, and
`kwargs`.
Examples
--------
>>> base = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, P=1E6)
>>> new = base.to_TP(T=1.0, P=2.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'T': 1.0, 'P': 2.0})
'''
if T != self.T or P != self.P:
return self.__class__(T=T, P=P, Tc=self.Tc, Pc=self.Pc, omega=self.omega, **self.kwargs)
else:
return self
def to_TV(self, T, V):
r'''Method to construct a new EOS object at the spcified `T` and `V`.
In the event the `T` and `V` match the current object's `T` and `V`,
it will be returned unchanged.
Parameters
----------
T : float
Temperature, [K]
V : float
Molar volume, [m^3/mol]
Returns
-------
obj : EOS
Pure component EOS at specified `T` and `V`, [-]
Notes
-----
Constructs the object with parameters `Tc`, `Pc`, `omega`, and
`kwargs`.
Examples
--------
>>> base = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, P=1E6)
>>> new = base.to_TV(T=1000000.0, V=1.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'T': 1000000.0, 'V': 1.0})
'''
if T != self.T or V != self.V:
# Only allow creation of new class if volume actually specified
# Ignores the posibility that V is V_l or V_g
return self.__class__(T=T, V=V, Tc=self.Tc, Pc=self.Pc, omega=self.omega, **self.kwargs)
else:
return self
def to_PV(self, P, V):
r'''Method to construct a new EOS object at the spcified `P` and `V`.
In the event the `P` and `V` match the current object's `P` and `V`,
it will be returned unchanged.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
Returns
-------
obj : EOS
Pure component EOS at specified `P` and `V`, [-]
Notes
-----
Constructs the object with parameters `Tc`, `Pc`, `omega`, and
`kwargs`.
Examples
--------
>>> base = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, P=1E6)
>>> new = base.to_PV(P=1000.0, V=1.0)
>>> base.state_specs, new.state_specs
({'T': 500.0, 'P': 1000000.0}, {'P': 1000.0, 'V': 1.0})
'''
if P != self.P or V != self.V:
return self.__class__(V=V, P=P, Tc=self.Tc, Pc=self.Pc, omega=self.omega, **self.kwargs)
else:
return self
def to(self, T=None, P=None, V=None):
r'''Method to construct a new EOS object at two of `T`, `P` or `V`.
In the event the specs match those of the current object, it will be
returned unchanged.
Parameters
----------
T : float or None, optional
Temperature, [K]
P : float or None, optional
Pressure, [Pa]
V : float or None, optional
Molar volume, [m^3/mol]
Returns
-------
obj : EOS
Pure component EOS at the two specified specs, [-]
Notes
-----
Constructs the object with parameters `Tc`, `Pc`, `omega`, and
`kwargs`.
Examples
--------
>>> base = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, P=1E6)
>>> base.to(T=300.0, P=1e9).state_specs
{'T': 300.0, 'P': 1000000000.0}
>>> base.to(T=300.0, V=1.0).state_specs
{'T': 300.0, 'V': 1.0}
>>> base.to(P=1e5, V=1.0).state_specs
{'P': 100000.0, 'V': 1.0}
'''
if T is not None and P is not None:
return self.to_TP(T, P)
elif T is not None and V is not None:
return self.to_TV(T, V)
elif P is not None and V is not None:
return self.to_PV(P, V)
else:
# Error message
return self.__class__(T=T, V=V, P=P, Tc=self.Tc, Pc=self.Pc, omega=self.omega, **self.kwargs)
def T_min_at_V(self, V, Pmin=1e-15):
'''Returns the minimum temperature for the EOS to have the
volume as specified. Under this temperature, the pressure will go
negative (and the EOS will not solve).
'''
return self.solve_T(P=Pmin, V=V)
def T_max_at_V(self, V, Pmax=None):
r'''Method to calculate the maximum temperature the EOS can create at a
constant volume, if one exists; returns None otherwise.
Parameters
----------
V : float
Constant molar volume, [m^3/mol]
Pmax : float
Maximum possible isochoric pressure, if already known [Pa]
Returns
-------
T : float
Maximum possible temperature, [K]
Notes
-----
Examples
--------
>>> e = PR(P=1e5, V=0.0001437, Tc=512.5, Pc=8084000.0, omega=0.559)
>>> e.T_max_at_V(e.V)
431155.5
'''
if Pmax is None:
Pmax = self.P_max_at_V(V)
if Pmax is None:
return None
return self.solve_T(P=Pmax, V=V)
def P_max_at_V(self, V):
r'''Dummy method. The idea behind this method, which is implemented by some
subclasses, is to calculate the maximum pressure the EOS can create at a
constant volume, if one exists; returns None otherwise. This method,
as a dummy method, always returns None.
Parameters
----------
V : float
Constant molar volume, [m^3/mol]
Returns
-------
P : float
Maximum possible isochoric pressure, [Pa]
'''
return None
@property
def more_stable_phase(self):
r'''Checks the Gibbs energy of each possible phase, and returns
'l' if the liquid-like phase is more stable, and 'g' if the vapor-like
phase is more stable.
Examples
--------
>>> PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6).more_stable_phase
'l'
'''
try:
if self.G_dep_l < self.G_dep_g:
return 'l'
else:
return 'g'
except:
try:
self.Z_g
return 'g'
except:
return 'l'
def discriminant(self, T=None, P=None):
r'''Method to compute the discriminant of the cubic volume solution
with the current EOS parameters, optionally at the same (assumed) `T`,
and `P` or at different ones, if values are specified.
Parameters
----------
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
Returns
-------
discriminant : float
Discriminant, [-]
Notes
-----
This call is quite quick; only :math:`a \alpha` is needed and if `T` is
the same as the current object than it has already been computed.
The formula is as follows:
.. math::
\text{discriminant} = - \left(- \frac{27 P^{2} \epsilon \left(
\frac{P b}{R T} + 1\right)}{R^{2} T^{2}} - \frac{27 P^{2} b
\operatorname{a \alpha}{\left(T \right)}}{R^{3} T^{3}}\right)
\left(- \frac{P^{2} \epsilon \left(\frac{P b}{R T} + 1\right)}
{R^{2} T^{2}} - \frac{P^{2} b \operatorname{a \alpha}{\left(T
\right)}}{R^{3} T^{3}}\right) + \left(- \frac{P^{2} \epsilon \left(
\frac{P b}{R T} + 1\right)}{R^{2} T^{2}} - \frac{P^{2} b
\operatorname{a \alpha}{\left(T \right)}}{R^{3} T^{3}}\right)
\left(- \frac{18 P b}{R T} + \frac{18 P \delta}{R T} - 18\right)
\left(\frac{P^{2} \epsilon}{R^{2} T^{2}} - \frac{P \delta \left(
\frac{P b}{R T} + 1\right)}{R T} + \frac{P \operatorname{a \alpha}
{\left(T \right)}}{R^{2} T^{2}}\right) - \left(- \frac{P^{2}
\epsilon \left(\frac{P b}{R T} + 1\right)}{R^{2} T^{2}} - \frac{
P^{2} b \operatorname{a \alpha}{\left(T \right)}}{R^{3} T^{3}}
\right) \left(- \frac{4 P b}{R T} + \frac{4 P \delta}{R T}
- 4\right) \left(- \frac{P b}{R T} + \frac{P \delta}{R T}
- 1\right)^{2} + \left(- \frac{P b}{R T} + \frac{P \delta}{R T}
- 1\right)^{2} \left(\frac{P^{2} \epsilon}{R^{2} T^{2}} - \frac{P
\delta \left(\frac{P b}{R T} + 1\right)}{R T} + \frac{P
\operatorname{a \alpha}{\left(T \right)}}{R^{2} T^{2}}\right)^{2}
- \left(\frac{P^{2} \epsilon}{R^{2} T^{2}} - \frac{P \delta \left(
\frac{P b}{R T} + 1\right)}{R T} + \frac{P \operatorname{a \alpha}{
\left(T \right)}}{R^{2} T^{2}}\right)^{2} \left(\frac{4 P^{2}
\epsilon}{R^{2} T^{2}} - \frac{4 P \delta \left(\frac{P b}{R T}
+ 1\right)}{R T} + \frac{4 P \operatorname{a \alpha}{\left(T
\right)}}{R^{2} T^{2}}\right)
The formula is derived as follows:
>>> from sympy import *
>>> P, T, R, b = symbols('P, T, R, b')
>>> a_alpha = symbols(r'a\ \alpha', cls=Function)
>>> delta, epsilon = symbols('delta, epsilon')
>>> eta = b
>>> B = b*P/(R*T)
>>> deltas = delta*P/(R*T)
>>> thetas = a_alpha(T)*P/(R*T)**2
>>> epsilons = epsilon*(P/(R*T))**2
>>> etas = eta*P/(R*T)
>>> a = 1
>>> b = (deltas - B - 1)
>>> c = (thetas + epsilons - deltas*(B+1))
>>> d = -(epsilons*(B+1) + thetas*etas)
>>> disc = b*b*c*c - 4*a*c*c*c - 4*b*b*b*d - 27*a*a*d*d + 18*a*b*c*d
Examples
--------
>>> base = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=500.0, P=1E6)
>>> base.discriminant()
-0.001026390999
>>> base.discriminant(T=400)
0.0010458828
>>> base.discriminant(T=400, P=1e9)
12584660355.4
'''
if P is None:
P = self.P
if T is None:
T = self.T
a_alpha = self.a_alpha
else:
a_alpha = self.a_alpha_and_derivatives(T, full=False)
RT = R*self.T
RT6 = RT*RT
RT6 *= RT6*RT6
x0 = P*P
x1 = P*self.b + RT
x2 = a_alpha*self.b + self.epsilon*x1
x3 = P*self.epsilon
x4 = self.delta*x1
x5 = -P*self.delta + x1
x6 = a_alpha + x3 - x4
x2_2 = x2*x2
x5_2 = x5*x5
x6_2 = x6*x6
x7 = (-a_alpha - x3 + x4)
return x0*(18.0*P*x2*x5*x6 + 4.0*P*x7*x7*x7
- 27.0*x0*x2_2 - 4.0*x2*x5_2*x5 + x5_2*x6_2)/RT6
def _discriminant_at_T_mp(self, P):
# Hopefully numerical difficulties can eventually be figured out to as to
# not need mpmath
import mpmath as mp
mp.mp.dps = 70
P, T, b, a_alpha, delta, epsilon, R_mp = [mp.mpf(i) for i in [P, self.T, self.b, self.a_alpha, self.delta, self.epsilon, R]]
RT = R_mp*T
RT6 = RT**6
x0 = P*P
x1 = P*b + RT
x2 = a_alpha*b + epsilon*x1
x3 = P*epsilon
x4 = delta*x1
x5 = -P*delta + x1
x6 = a_alpha + x3 - x4
x2_2 = x2*x2
x5_2 = x5*x5
x6_2 = x6*x6
disc = (x0*(18.0*P*x2*x5*x6 + 4.0*P*(-a_alpha - x3 + x4)**3
- 27.0*x0*x2_2 - 4.0*x2*x5_2*x5 + x5_2*x6_2)/RT6)
return disc
def P_discriminant_zero_l(self):
r'''Method to calculate the pressure which zero the discriminant
function of the general cubic eos, and is likely to sit on a boundary
between not having a liquid-like volume; and having a liquid-like volume.
Returns
-------
P_discriminant_zero_l : float
Pressure which make the discriminants zero at the right condition,
[Pa]
Notes
-----
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> P_trans = eos.P_discriminant_zero_l()
>>> P_trans
478346.37289
In this case, the discriminant transition shows the change in roots:
>>> eos.to(T=eos.T, P=P_trans*.99999999).mpmath_volumes_float
((0.00013117994140177062+0j), (0.002479717165903531+0j), (0.002480236178570793+0j))
>>> eos.to(T=eos.T, P=P_trans*1.0000001).mpmath_volumes_float
((0.0001311799413872173+0j), (0.002479976386402769-8.206310112063695e-07j), (0.002479976386402769+8.206310112063695e-07j))
'''
return self._P_discriminant_zero(low=True)
def P_discriminant_zero_g(self):
r'''Method to calculate the pressure which zero the discriminant
function of the general cubic eos, and is likely to sit on a boundary
between not having a vapor-like volume; and having a vapor-like volume.
Returns
-------
P_discriminant_zero_g : float
Pressure which make the discriminants zero at the right condition,
[Pa]
Notes
-----
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> P_trans = eos.P_discriminant_zero_g()
>>> P_trans
149960391.7
In this case, the discriminant transition does not reveal a transition
to two roots being available, only negative roots becoming negative
and imaginary.
>>> eos.to(T=eos.T, P=P_trans*.99999999).mpmath_volumes_float
((-0.0001037013146195082-1.5043987866732543e-08j), (-0.0001037013146195082+1.5043987866732543e-08j), (0.00011799201928619508+0j))
>>> eos.to(T=eos.T, P=P_trans*1.0000001).mpmath_volumes_float
((-0.00010374888853182635+0j), (-0.00010365374200380354+0j), (0.00011799201875924273+0j))
'''
return self._P_discriminant_zero(low=False)
def P_discriminant_zeros(self):
r'''Method to calculate the pressures which zero the discriminant
function of the general cubic eos, at the current temperature.
Returns
-------
P_discriminant_zeros : list[float]
Pressures which make the discriminants zero, [Pa]
Notes
-----
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.P_discriminant_zeros()
[478346.3, 149960391.7]
'''
return GCEOS.P_discriminant_zeros_analytical(self.T, self.b, self.delta, self.epsilon, self.a_alpha, valid=True)
@staticmethod
def P_discriminant_zeros_analytical(T, b, delta, epsilon, a_alpha, valid=False):
r'''Method to calculate the pressures which zero the discriminant
function of the general cubic eos. This is a quartic function
solved analytically.
Parameters
----------
T : float
Temperature, [K]
b : float
Coefficient calculated by EOS-specific method, [m^3/mol]
delta : float
Coefficient calculated by EOS-specific method, [m^3/mol]
epsilon : float
Coefficient calculated by EOS-specific method, [m^6/mol^2]
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
valid : bool
Whether to filter the calculated pressures so that they are all
real, and positive only, [-]
Returns
-------
P_discriminant_zeros : float
Pressures which make the discriminants zero, [Pa]
Notes
-----
Calculated analytically. Derived as follows.
>>> from sympy import *
>>> P, T, V, R, b, a, delta, epsilon = symbols('P, T, V, R, b, a, delta, epsilon')
>>> eta = b
>>> B = b*P/(R*T)
>>> deltas = delta*P/(R*T)
>>> thetas = a*P/(R*T)**2
>>> epsilons = epsilon*(P/(R*T))**2
>>> etas = eta*P/(R*T)
>>> a_coeff = 1
>>> b_coeff = (deltas - B - 1)
>>> c = (thetas + epsilons - deltas*(B+1))
>>> d = -(epsilons*(B+1) + thetas*etas)
>>> disc = b_coeff*b_coeff*c*c - 4*a_coeff*c*c*c - 4*b_coeff*b_coeff*b_coeff*d - 27*a_coeff*a_coeff*d*d + 18*a_coeff*b_coeff*c*d
>>> base = -(expand(disc/P**2*R**3*T**3))
>>> sln = collect(base, P)
'''
# Can also have one at g
# T, a_alpha = self.T, self.a_alpha
a = a_alpha
# b, epsilon, delta = self.b, self.epsilon, self.delta
T_inv = 1.0/T
# TODO cse
x0 = 4.0*a
x1 = b*x0
x2 = a+a
x3 = delta*x2
x4 = R*T
x5 = 4.0*epsilon
x6 = delta*delta
x7 = a*a
x8 = T_inv*R_inv
x9 = 8.0*epsilon
x10 = b*x9
x11 = 4.0*delta
x12 = delta*x6
x13 = 2.0*x6
x14 = b*x13
x15 = a*x8
x16 = epsilon*x15
x20 = x8*x8
x17 = x20*x8
x18 = b*delta
x19 = 6.0*x15
x21 = x20*x7
x22 = 10.0*b
x23 = b*b
x24 = 6.0*x23
x25 = x0*x8
x26 = x6*x6
x27 = epsilon*epsilon
x28 = 8.0*x27
x29 = 24.0*epsilon
x30 = b*x12
x31 = epsilon*x13
x32 = epsilon*x8
x33 = 12.0*epsilon
x34 = b*x23
x35 = x2*x8
x36 = 8.0*x21
x37 = x15*x6
x38 = delta*x23
x39 = b*x28
x40 = x34*x9
x41 = epsilon*x12
x42 = x23*x23
e = x1 + x3 + x4*x5 - x4*x6 - x7*x8
d = (4.0*x7*a*x17 - 10.0*delta*x21 + 2.0*(epsilon*x11 + x10 - x12
- x14 + x15*x24 + x18*x19 - x21*x22 + x25*x6) - 20.0*x16)
c = x8*(-x1*x32 + x12*x35 + x15*(12.0*x34 + 18.0*x38) + x18*(x29 + x36)
+ x21*(x33 - x6) + x22*x37 + x23*(x29 + x36) - x24*x6 - x26
+ x28 - x3*x32 - 6.0*x30 + x31)
b_coeff = (2.0*x20*(-b*x26 + delta*(x10*x15 + x25*x34) + epsilon*x14
+ x23*(x15*x9 - 3.0*x12 + x37) - x13*x34 - x15*x30
-x16*x6 + x27*(x19 + x11) + x33*x38 + x35*x42
+ x39 + x40 - x41))
a_coeff = x17*(-2.0*b*x41 + delta*(x39 + x40)
+ x27*(4.0*epsilon - x6)
- 2.0*x12*x34 + x23*(x28 + x31 - x26)
+ x42*(x5 - x6))
# e = (2*a*delta + 4*a*b -R*T*delta**2 - a**2/(R*T) + 4*R*T*epsilon)
# d = (-4*b*delta**2 + 16*b*epsilon - 2*delta**3 + 8*delta*epsilon + 12*a*b**2/(R*T) + 12*a*b*delta/(R*T) + 8*a*delta**2/(R*T) - 20*a*epsilon/(R*T) - 20*a**2*b/(R**2*T**2) - 10*a**2*delta/(R**2*T**2) + 4*a**3/(R**3*T**3))
# c = (-6*b**2*delta**2/(R*T) + 24*b**2*epsilon/(R*T) - 6*b*delta**3/(R*T) + 24*b*delta*epsilon/(R*T) - delta**4/(R*T) + 2*delta**2*epsilon/(R*T) + 8*epsilon**2/(R*T) + 12*a*b**3/(R**2*T**2) + 18*a*b**2*delta/(R**2*T**2) + 10*a*b*delta**2/(R**2*T**2) - 4*a*b*epsilon/(R**2*T**2) + 2*a*delta**3/(R**2*T**2) - 2*a*delta*epsilon/(R**2*T**2) + 8*a**2*b**2/(R**3*T**3) + 8*a**2*b*delta/(R**3*T**3) - a**2*delta**2/(R**3*T**3) + 12*a**2*epsilon/(R**3*T**3))
# b_coeff = (-4*b**3*delta**2/(R**2*T**2) + 16*b**3*epsilon/(R**2*T**2) - 6*b**2*delta**3/(R**2*T**2) + 24*b**2*delta*epsilon/(R**2*T**2) - 2*b*delta**4/(R**2*T**2) + 4*b*delta**2*epsilon/(R**2*T**2) + 16*b*epsilon**2/(R**2*T**2) - 2*delta**3*epsilon/(R**2*T**2) + 8*delta*epsilon**2/(R**2*T**2) + 4*a*b**4/(R**3*T**3) + 8*a*b**3*delta/(R**3*T**3) + 2*a*b**2*delta**2/(R**3*T**3) + 16*a*b**2*epsilon/(R**3*T**3) - 2*a*b*delta**3/(R**3*T**3) + 16*a*b*delta*epsilon/(R**3*T**3) - 2*a*delta**2*epsilon/(R**3*T**3) + 12*a*epsilon**2/(R**3*T**3))
# a_coeff = (-b**4*delta**2/(R**3*T**3) + 4*b**4*epsilon/(R**3*T**3) - 2*b**3*delta**3/(R**3*T**3) + 8*b**3*delta*epsilon/(R**3*T**3) - b**2*delta**4/(R**3*T**3) + 2*b**2*delta**2*epsilon/(R**3*T**3) + 8*b**2*epsilon**2/(R**3*T**3) - 2*b*delta**3*epsilon/(R**3*T**3) + 8*b*delta*epsilon**2/(R**3*T**3) - delta**2*epsilon**2/(R**3*T**3) + 4*epsilon**3/(R**3*T**3))
roots = roots_quartic(a_coeff, b_coeff, c, d, e)
# roots = np.roots([a_coeff, b_coeff, c, d, e]).tolist()
if valid:
# TODO - only include ones when switching phases from l/g to either g/l
# Do not know how to handle
roots = [r.real for r in roots if (r.real >= 0.0)]
roots.sort()
return roots
def _P_discriminant_zero(self, low):
# Can also have one at g
T, a_alpha = self.T, self.a_alpha
b, epsilon, delta = self.b, self.epsilon, self.delta
global niter
niter = 0
RT = R*T
x13 = RT**-6.0
x14 = b*epsilon
x15 = -b*delta + epsilon
x18 = b - delta
def discriminant_fun(P):
if P < 0:
raise ValueError("Will not converge")
global niter
niter += 1
x0 = P*P
x1 = P*epsilon
x2 = P*b + RT
x3 = a_alpha - delta*x2 + x1
x3_x3 = x3*x3
x4 = x3*x3_x3
x5 = a_alpha*b + epsilon*x2
x6 = 27.0*x5*x5
x7 = -P*delta + x2
x9 = x7*x7
x8 = x7*x9
x11 = x3*x5*x7
x12 = -18.0*P*x11 + 4.0*(P*x4 +x5*x8) + x0*x6 - x3_x3*x9
x16 = P*x15
x17 = 9.0*x3
x19 = x18*x5
# 26 mult so far
err = -x0*x12*x13
fprime = (-2.0*P*x13*(P*(-P*x17*x19 + P*x6 - b*x1*x17*x7
+ 27.0*x0*x14*x5 + 6.0*x3_x3*x16 - x3_x3*x18*x7
- 9.0*x11 + 2.0*x14*x8 - x15*x3*x9 - 9.0*x16*x5*x7 + 6.0*x19*x9 + 2.0*x4) + x12))
if niter > 3 and (.40 < (err/(P*fprime)) < 0.55):
raise ValueError("Not going to work")
# a = (err/fprime)/P
# print('low probably kill point')
return err, fprime
# New answer: Above critical T only high P result
# Ps = logspace(log10(1), log10(1e10), 40000)
# errs = []
# for P in Ps:
# erri = self.discriminant(P=P)
# if erri < 0:
# erri = -log10(abs(erri))
# else:
# erri = log10(erri)
# errs.append(erri)
# import matplotlib.pyplot as plt
# plt.semilogx(Ps, errs, 'x')
# # plt.ylim((-1e-3, 1e-3))
# plt.show()
# Checked once
# def damping_func(p0, step, damping):
# if p0 + step < 0.0:
# return 0.9*p0
# # while p0 + step < 1e3:
# # if p0 + step < 1e3:
# # step = 0.5*step
# return p0 + step
#low=1,damping_func=damping_func
# 5e7
try:
Tc = self.Tc
except:
Tc = self.pseudo_Tc
guesses = [1e5, 1e6, 1e7, 1e8, 1e9, .5, 1e-4, 1e-8, 1e-12, 1e-16, 1e-20]
if not low:
guesses = [1e9, 1e10, 1e10, 5e10, 2e10, 5e9, 5e8, 1e8]
if self.N == 1 and low:
try:
try:
Tc, Pc, omega = self.Tc, self.Pc, self.omega
except:
Tc, Pc, omega = self.Tcs[0], self.Pcs[0], self.omegas[0]
guesses.append(Pc*.99999999)
assert T/Tc > .3
P_wilson = Wilson_K_value(self.T, self.P, Tc, Pc, omega)*self.P
guesses.insert(0, P_wilson*3)
except:
pass
if low:
coeffs = self._P_zero_l_cheb_coeffs
coeffs_low, coeffs_high = self.P_zero_l_cheb_limits
else:
coeffs = self._P_zero_g_cheb_coeffs
coeffs_low, coeffs_high = self.P_zero_g_cheb_limits
if coeffs is not None:
try:
a = self.a
except:
a = self.pseudo_a
alpha = self.a_alpha/a
try:
Pc = self.Pc
except:
Pc = self.pseudo_Pc
Tr = self.T/Tc
alpha_Tr = alpha/(Tr)
x = alpha_Tr - 1.0
if coeffs_low < x < coeffs_high:
constant = 0.5*(-coeffs_low - coeffs_high)
factor = 2.0/(coeffs_high - coeffs_low)
y = chebval(factor*(x + constant), coeffs)
P_trans = y*Tr*Pc
guesses.insert(0, P_trans)
global_iter = 0
for P in guesses:
try:
global_iter += niter
niter = 0
# try:
# P_disc = newton(discriminant_fun, P, fprime=True, xtol=1e-16, low=1, maxiter=200, bisection=False, damping=1)
# except:
# high = None
# if self.N == 1:
# try:
# high = self.Pc
# except:
# high = self.Pcs[0]
# high *= (1+1e-11)
if not low and T < Tc:
low_bound = 1e8
else:
if Tr > .3:
low_bound = 1.0
else:
low_bound = None
P_disc = newton(discriminant_fun, P, fprime=True, xtol=4e-12, low=low_bound,
maxiter=80, bisection=False, damping=1)
assert P_disc > 0 and not P_disc == 1
if not low:
assert P_disc > low_bound
break
except:
pass
if not low:
assert P_disc > low_bound
global_iter += niter
# for i in range(1000):
# a = 1
if 0:
try:
P_disc = bisect(self._discriminant_at_T_mp, P_disc*(1-1e-8), P_disc*(1+1e-8), xtol=1e-18)
except:
try:
P_disc = bisect(self._discriminant_at_T_mp, P_disc*(1-1e-5), P_disc*(1+1e-5), xtol=1e-18)
except:
try:
P_disc = bisect(self._discriminant_at_T_mp, P_disc*(1-1e-2), P_disc*(1+1e-2))
except:
pass
# if not low:
# P_disc_base = None
# try:
# if T < Tc:
# P_disc_base = self._P_discriminant_zero(True)
# except:
# pass
# if P_disc_base is not None:
# # pass
# if isclose(P_disc_base, P_disc, rel_tol=1e-4):
# raise ValueError("Converged to wrong solution")
return float(P_disc)
# Can take a while to converge
P_disc = secant(lambda P: self.discriminant(P=P), self.P, xtol=1e-7, low=1e-12, maxiter=200, bisection=True)
if P_disc <= 0.0:
P_disc = secant(lambda P: self.discriminant(P=P), self.P*100, xtol=1e-7, maxiter=200)
# P_max = self.P*1000
# P_disc = brenth(lambda P: self.discriminant(P=P), self.P*1e-3, P_max, rtol=1e-7, maxiter=200)
return P_disc
def _plot_T_discriminant_zero(self):
Ts = logspace(log10(1), log10(1e4), 10000)
errs = []
for T in Ts:
erri = self.discriminant(T=T)
# if erri < 0:
# erri = -log10(abs(erri))
# else:
# erri = log10(erri)
errs.append(erri)
import matplotlib.pyplot as plt
plt.semilogx(Ts, errs, 'x')
# plt.ylim((-1e-3, 1e-3))
plt.show()
def T_discriminant_zero_l(self, T_guess=None):
r'''Method to calculate the temperature which zeros the discriminant
function of the general cubic eos, and is likely to sit on a boundary
between not having a liquid-like volume; and having a liquid-like volume.
Parameters
----------
T_guess : float, optional
Temperature guess, [K]
Returns
-------
T_discriminant_zero_l : float
Temperature which make the discriminants zero at the right condition,
[K]
Notes
-----
Significant numerical issues remain in improving this method.
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> T_trans = eos.T_discriminant_zero_l()
>>> T_trans
644.3023307
In this case, the discriminant transition does not reveal a transition
to two roots being available, only to there being a double (imaginary)
root.
>>> eos.to(P=eos.P, T=T_trans).mpmath_volumes_float
((9.309597822372529e-05-0.00015876248805149625j), (9.309597822372529e-05+0.00015876248805149625j), (0.005064847204219234+0j))
'''
# Can also have one at g
global niter
niter = 0
guesses = [100, 150, 200, 250, 300, 350, 400, 450]
if T_guess is not None:
guesses.append(T_guess)
if self.N == 1:
pass
global_iter = 0
for T in guesses:
try:
global_iter += niter
niter = 0
T_disc = secant(lambda T: self.discriminant(T=T), T, xtol=1e-10, low=1, maxiter=60, bisection=False, damping=1)
assert T_disc > 0 and not T_disc == 1
break
except:
pass
global_iter += niter
return T_disc
def T_discriminant_zero_g(self, T_guess=None):
r'''Method to calculate the temperature which zeros the discriminant
function of the general cubic eos, and is likely to sit on a boundary
between not having a vapor-like volume; and having a vapor-like volume.
Parameters
----------
T_guess : float, optional
Temperature guess, [K]
Returns
-------
T_discriminant_zero_g : float
Temperature which make the discriminants zero at the right condition,
[K]
Notes
-----
Significant numerical issues remain in improving this method.
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> T_trans = eos.T_discriminant_zero_g()
>>> T_trans
644.3023307
In this case, the discriminant transition does not reveal a transition
to two roots being available, only to there being a double (imaginary)
root.
>>> eos.to(P=eos.P, T=T_trans).mpmath_volumes_float
((9.309597822372529e-05-0.00015876248805149625j), (9.309597822372529e-05+0.00015876248805149625j), (0.005064847204219234+0j))
'''
global niter
niter = 0
guesses = [700, 600, 500, 400, 300, 200]
if T_guess is not None:
guesses.append(T_guess)
if self.N == 1:
pass
global_iter = 0
for T in guesses:
try:
global_iter += niter
niter = 0
T_disc = secant(lambda T: self.discriminant(T=T), T, xtol=1e-10, low=1, maxiter=60, bisection=False, damping=1)
assert T_disc > 0 and not T_disc == 1
break
except:
pass
global_iter += niter
return T_disc
def P_PIP_transition(self, T, low_P_limit=0.0):
r'''Method to calculate the pressure which makes the phase
identification parameter exactly 1. There are three regions for this
calculation:
* subcritical - PIP = 1 for the gas-like phase at P = 0
* initially supercritical - PIP = 1 on a curve starting at the
critical point, increasing for a while, decreasing for a while,
and then curving sharply back to a zero pressure.
* later supercritical - PIP = 1 for the liquid-like phase at P = 0
Parameters
----------
T : float
Temperature for the calculation, [K]
low_P_limit : float
What value to return for the subcritical and later region, [Pa]
Returns
-------
P : float
Pressure which makes the PIP = 1, [Pa]
Notes
-----
The transition between the region where this function returns values
and the high temperature region that doesn't is the Joule-Thomson
inversion point at a pressure of zero and can be directly solved for.
Examples
--------
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.P_PIP_transition(100)
0.0
>>> low_T = eos.to(T=100.0, P=eos.P_PIP_transition(100, low_P_limit=1e-5))
>>> low_T.PIP_l, low_T.PIP_g
(45.778088191, 0.9999999997903)
>>> initial_super = eos.to(T=600.0, P=eos.P_PIP_transition(600))
>>> initial_super.P, initial_super.PIP_g
(6456282.17132, 0.999999999999)
>>> high_T = eos.to(T=900.0, P=eos.P_PIP_transition(900, low_P_limit=1e-5))
>>> high_T.P, high_T.PIP_g
(12536704.763, 0.9999999999)
'''
subcritical = T < self.Tc
if subcritical:
return low_P_limit
else:
def to_solve(P):
e = self.to(T=T, P=P)
# TODO: as all a_alpha is the same for all conditions, should be
# able to derive a direct expression for this from the EOS which
# only uses a volume solution
# TODO: should be able to get the derivative of PIP w.r.t. pressure
if hasattr(e, 'V_l'):
return e.PIP_l-1.0
else:
return e.PIP_g-1.0
try:
# Near the critical point these equations turn extremely nasty!
# bisection is the most reliable solver
if subcritical:
Psat = self.Psat(T)
low, high = 10.0*Psat, Psat
else:
low, high = 1e-3, 1e11
P = bisect(to_solve, low, high)
return P
except:
err_low = to_solve(low_P_limit)
if abs(err_low) < 1e-9:
# Well above the critical point all solutions except the
# zero-pressure limit have PIP values above 1
# This corresponds to the JT inversion temperature at a
# pressure of zero.
return low_P_limit
raise ValueError("Could not converge")
def _V_g_extrapolated(self):
P_pseudo_mc = sum([self.Pcs[i]*self.zs[i] for i in self.cmps])
T_pseudo_mc = sum([(self.Tcs[i]*self.Tcs[j])**0.5*self.zs[j]*self.zs[i]
for i in self.cmps for j in self.cmps])
V_pseudo_mc = (self.Zc*R*T_pseudo_mc)/P_pseudo_mc
rho_pseudo_mc = 1.0/V_pseudo_mc
P_disc = self.P_discriminant_zero_l()
try:
P_low = max(P_disc - 10.0, 1e-3)
eos_low = self.to_TP_zs(T=self.T, P=P_low, zs=self.zs)
rho_low = 1.0/eos_low.V_g
except:
P_low = max(P_disc + 10.0, 1e-3)
eos_low = self.to_TP_zs(T=self.T, P=P_low, zs=self.zs)
rho_low = 1.0/eos_low.V_g
rho0 = (rho_low + 1.4*rho_pseudo_mc)*0.5
dP_drho = eos_low.dP_drho_g
rho1 = P_low*((rho_low - 1.4*rho_pseudo_mc) + P_low/dP_drho)
rho2 = -P_low*P_low*((rho_low - 1.4*rho_pseudo_mc)*0.5 + P_low/dP_drho)
rho_ans = rho0 + rho1/eos_low.P + rho2/(eos_low.P*eos_low.P)
return 1.0/rho_ans
@property
def fugacity_l(self):
r'''Fugacity for the liquid phase, [Pa].
.. math::
\text{fugacity} = P\exp\left(\frac{G_{dep}}{RT}\right)
'''
arg = self.G_dep_l*R_inv/self.T
try:
return self.P*exp(arg)
except:
return self.P*1e308
@property
def fugacity_g(self):
r'''Fugacity for the gas phase, [Pa].
.. math::
\text{fugacity} = P\exp\left(\frac{G_{dep}}{RT}\right)
'''
arg = self.G_dep_g*R_inv/self.T
try:
return self.P*exp(arg)
except:
return self.P*1e308
@property
def phi_l(self):
r'''Fugacity coefficient for the liquid phase, [Pa].
.. math::
\phi = \frac{\text{fugacity}}{P}
'''
arg = self.G_dep_l*R_inv/self.T
try:
return exp(arg)
except:
return 1e308
@property
def phi_g(self):
r'''Fugacity coefficient for the gas phase, [Pa].
.. math::
\phi = \frac{\text{fugacity}}{P}
'''
arg = self.G_dep_g*R_inv/self.T
try:
return exp(arg)
except:
return 1e308
@property
def Cp_minus_Cv_l(self):
r'''Cp - Cv for the liquid phase, [J/mol/K].
.. math::
C_p - C_v = -T\left(\frac{\partial P}{\partial T}\right)_V^2/
\left(\frac{\partial P}{\partial V}\right)_T
'''
return -self.T*self.dP_dT_l*self.dP_dT_l*self.dV_dP_l
@property
def Cp_minus_Cv_g(self):
r'''Cp - Cv for the gas phase, [J/mol/K].
.. math::
C_p - C_v = -T\left(\frac{\partial P}{\partial T}\right)_V^2/
\left(\frac{\partial P}{\partial V}\right)_T
'''
return -self.T*self.dP_dT_g*self.dP_dT_g*self.dV_dP_g
@property
def beta_l(self):
r'''Isobaric (constant-pressure) expansion coefficient for the liquid
phase, [1/K].
.. math::
\beta = \frac{1}{V}\frac{\partial V}{\partial T}
'''
return self.dV_dT_l/self.V_l
@property
def beta_g(self):
r'''Isobaric (constant-pressure) expansion coefficient for the gas
phase, [1/K].
.. math::
\beta = \frac{1}{V}\frac{\partial V}{\partial T}
'''
return self.dV_dT_g/self.V_g
@property
def kappa_l(self):
r'''Isothermal (constant-temperature) expansion coefficient for the liquid
phase, [1/Pa].
.. math::
\kappa = \frac{-1}{V}\frac{\partial V}{\partial P}
'''
return -self.dV_dP_l/self.V_l
@property
def kappa_g(self):
r'''Isothermal (constant-temperature) expansion coefficient for the gas
phase, [1/Pa].
.. math::
\kappa = \frac{-1}{V}\frac{\partial V}{\partial P}
'''
return -self.dV_dP_g/self.V_g
@property
def V_dep_l(self):
r'''Departure molar volume from ideal gas behavior for the liquid phase,
[m^3/mol].
.. math::
V_{dep} = V - \frac{RT}{P}
'''
return self.V_l - self.T*R/self.P
@property
def V_dep_g(self):
r'''Departure molar volume from ideal gas behavior for the gas phase,
[m^3/mol].
.. math::
V_{dep} = V - \frac{RT}{P}
'''
return self.V_g - self.T*R/self.P
@property
def U_dep_l(self):
r'''Departure molar internal energy from ideal gas behavior for the
liquid phase, [J/mol].
.. math::
U_{dep} = H_{dep} - P V_{dep}
'''
return self.H_dep_l - self.P*(self.V_l - self.T*R/self.P)
@property
def U_dep_g(self):
r'''Departure molar internal energy from ideal gas behavior for the
gas phase, [J/mol].
.. math::
U_{dep} = H_{dep} - P V_{dep}
'''
return self.H_dep_g - self.P*(self.V_g - self.T*R/self.P)
@property
def A_dep_l(self):
r'''Departure molar Helmholtz energy from ideal gas behavior for the
liquid phase, [J/mol].
.. math::
A_{dep} = U_{dep} - T S_{dep}
'''
return self.H_dep_l - self.P*(self.V_l - self.T*R/self.P) - self.T*self.S_dep_l
@property
def A_dep_g(self):
r'''Departure molar Helmholtz energy from ideal gas behavior for the
gas phase, [J/mol].
.. math::
A_{dep} = U_{dep} - T S_{dep}
'''
return self.H_dep_g - self.P*(self.V_g - self.T*R/self.P) - self.T*self.S_dep_g
@property
def d2T_dPdV_l(self):
r'''Second partial derivative of temperature with respect to
pressure (constant volume) and then volume (constant pressure)
for the liquid phase, [K*mol/(Pa*m^3)].
.. math::
\left(\frac{\partial^2 T}{\partial P\partial V}\right) =
- \left[\left(\frac{\partial^2 P}{\partial T \partial V}\right)
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T^2}\right)_V
\right]\left(\frac{\partial P}{\partial T}\right)_V^{-3}
'''
inverse_dP_dT2 = self.dT_dP_l*self.dT_dP_l
inverse_dP_dT3 = inverse_dP_dT2*self.dT_dP_l
d2T_dPdV = -(self.d2P_dTdV_l*self.dP_dT_l - self.dP_dV_l*self.d2P_dT2_l)*inverse_dP_dT3
return d2T_dPdV
@property
def d2T_dPdV_g(self):
r'''Second partial derivative of temperature with respect to
pressure (constant volume) and then volume (constant pressure)
for the gas phase, [K*mol/(Pa*m^3)].
.. math::
\left(\frac{\partial^2 T}{\partial P\partial V}\right) =
- \left[\left(\frac{\partial^2 P}{\partial T \partial V}\right)
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T^2}\right)_V
\right]\left(\frac{\partial P}{\partial T}\right)_V^{-3}
'''
inverse_dP_dT2 = self.dT_dP_g*self.dT_dP_g
inverse_dP_dT3 = inverse_dP_dT2*self.dT_dP_g
d2T_dPdV = -(self.d2P_dTdV_g*self.dP_dT_g - self.dP_dV_g*self.d2P_dT2_g)*inverse_dP_dT3
return d2T_dPdV
@property
def d2V_dPdT_l(self):
r'''Second partial derivative of volume with respect to
pressure (constant temperature) and then presssure (constant temperature)
for the liquid phase, [m^3/(K*Pa*mol)].
.. math::
\left(\frac{\partial^2 V}{\partial T\partial P}\right) =
- \left[\left(\frac{\partial^2 P}{\partial T \partial V}\right)
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial V^2}\right)_T
\right]\left(\frac{\partial P}{\partial V}\right)_T^{-3}
'''
dV_dP = self.dV_dP_l
return -(self.d2P_dTdV_l*self.dP_dV_l - self.dP_dT_l*self.d2P_dV2_l)*dV_dP*dV_dP*dV_dP
@property
def d2V_dPdT_g(self):
r'''Second partial derivative of volume with respect to
pressure (constant temperature) and then presssure (constant temperature)
for the gas phase, [m^3/(K*Pa*mol)].
.. math::
\left(\frac{\partial^2 V}{\partial T\partial P}\right) =
- \left[\left(\frac{\partial^2 P}{\partial T \partial V}\right)
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial V^2}\right)_T
\right]\left(\frac{\partial P}{\partial V}\right)_T^{-3}
'''
dV_dP = self.dV_dP_g
return -(self.d2P_dTdV_g*self.dP_dV_g - self.dP_dT_g*self.d2P_dV2_g)*dV_dP*dV_dP*dV_dP
@property
def d2T_dP2_l(self):
r'''Second partial derivative of temperature with respect to
pressure (constant temperature) for the liquid phase, [K/Pa^2].
.. math::
\left(\frac{\partial^2 T}{\partial P^2}\right)_V = -\left(\frac{
\partial^2 P}{\partial T^2}\right)_V \left(\frac{\partial P}{
\partial T}\right)^{-3}_V
'''
dT_dP = self.dT_dP_l
return -self.d2P_dT2_l*dT_dP*dT_dP*dT_dP # unused
@property
def d2T_dP2_g(self):
r'''Second partial derivative of temperature with respect to
pressure (constant volume) for the gas phase, [K/Pa^2].
.. math::
\left(\frac{\partial^2 T}{\partial P^2}\right)_V = -\left(\frac{
\partial^2 P}{\partial T^2}\right)_V \left(\frac{\partial P}{
\partial T}\right)^{-3}_V
'''
dT_dP = self.dT_dP_g
return -self.d2P_dT2_g*dT_dP*dT_dP*dT_dP # unused
@property
def d2V_dP2_l(self):
r'''Second partial derivative of volume with respect to
pressure (constant temperature) for the liquid phase, [m^3/(Pa^2*mol)].
.. math::
\left(\frac{\partial^2 V}{\partial P^2}\right)_T = -\left(\frac{
\partial^2 P}{\partial V^2}\right)_T \left(\frac{\partial P}{
\partial V}\right)^{-3}_T
'''
dV_dP = self.dV_dP_l
return -self.d2P_dV2_l*dV_dP*dV_dP*dV_dP
@property
def d2V_dP2_g(self):
r'''Second partial derivative of volume with respect to
pressure (constant temperature) for the gas phase, [m^3/(Pa^2*mol)].
.. math::
\left(\frac{\partial^2 V}{\partial P^2}\right)_T = -\left(\frac{
\partial^2 P}{\partial V^2}\right)_T \left(\frac{\partial P}{
\partial V}\right)^{-3}_T
'''
dV_dP = self.dV_dP_g
return -self.d2P_dV2_g*dV_dP*dV_dP*dV_dP
@property
def d2T_dV2_l(self):
r'''Second partial derivative of temperature with respect to
volume (constant pressure) for the liquid phase, [K*mol^2/m^6].
.. math::
\left(\frac{\partial^2 T}{\partial V^2}\right)_P = -\left[
\left(\frac{\partial^2 P}{\partial V^2}\right)_T
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T \partial V}\right) \right]
\left(\frac{\partial P}{\partial T}\right)^{-2}_V
+ \left[\left(\frac{\partial^2 P}{\partial T\partial V}\right)
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T^2}\right)_V\right]
\left(\frac{\partial P}{\partial T}\right)_V^{-3}
\left(\frac{\partial P}{\partial V}\right)_T
'''
dT_dP = self.dT_dP_l
dT_dP2 = dT_dP*dT_dP
d2T_dV2 = dT_dP2*(-(self.d2P_dV2_l*self.dP_dT_l - self.dP_dV_l*self.d2P_dTdV_l)
+(self.d2P_dTdV_l*self.dP_dT_l - self.dP_dV_l*self.d2P_dT2_l)*dT_dP*self.dP_dV_l)
return d2T_dV2
@property
def d2T_dV2_g(self):
r'''Second partial derivative of temperature with respect to
volume (constant pressure) for the gas phase, [K*mol^2/m^6].
.. math::
\left(\frac{\partial^2 T}{\partial V^2}\right)_P = -\left[
\left(\frac{\partial^2 P}{\partial V^2}\right)_T
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T \partial V}\right) \right]
\left(\frac{\partial P}{\partial T}\right)^{-2}_V
+ \left[\left(\frac{\partial^2 P}{\partial T\partial V}\right)
\left(\frac{\partial P}{\partial T}\right)_V
- \left(\frac{\partial P}{\partial V}\right)_T
\left(\frac{\partial^2 P}{\partial T^2}\right)_V\right]
\left(\frac{\partial P}{\partial T}\right)_V^{-3}
\left(\frac{\partial P}{\partial V}\right)_T
'''
dT_dP = self.dT_dP_g
dT_dP2 = dT_dP*dT_dP
d2T_dV2 = dT_dP2*(-(self.d2P_dV2_g*self.dP_dT_g - self.dP_dV_g*self.d2P_dTdV_g)
+(self.d2P_dTdV_g*self.dP_dT_g - self.dP_dV_g*self.d2P_dT2_g)*dT_dP*self.dP_dV_g)
return d2T_dV2
@property
def d2V_dT2_l(self):
r'''Second partial derivative of volume with respect to
temperature (constant pressure) for the liquid phase, [m^3/(mol*K^2)].
.. math::
\left(\frac{\partial^2 V}{\partial T^2}\right)_P = -\left[
\left(\frac{\partial^2 P}{\partial T^2}\right)_V
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial T \partial V}\right) \right]
\left(\frac{\partial P}{\partial V}\right)^{-2}_T
+ \left[\left(\frac{\partial^2 P}{\partial T\partial V}\right)
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial V^2}\right)_T\right]
\left(\frac{\partial P}{\partial V}\right)_T^{-3}
\left(\frac{\partial P}{\partial T}\right)_V
'''
dV_dP = self.dV_dP_l
dP_dV = self.dP_dV_l
d2P_dTdV = self.d2P_dTdV_l
dP_dT = self.dP_dT_l
d2V_dT2 = dV_dP*dV_dP*(-(self.d2P_dT2_l*dP_dV - dP_dT*d2P_dTdV) # unused
+(d2P_dTdV*dP_dV - dP_dT*self.d2P_dV2_l)*dV_dP*dP_dT)
return d2V_dT2
@property
def d2V_dT2_g(self):
r'''Second partial derivative of volume with respect to
temperature (constant pressure) for the gas phase, [m^3/(mol*K^2)].
.. math::
\left(\frac{\partial^2 V}{\partial T^2}\right)_P = -\left[
\left(\frac{\partial^2 P}{\partial T^2}\right)_V
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial T \partial V}\right) \right]
\left(\frac{\partial P}{\partial V}\right)^{-2}_T
+ \left[\left(\frac{\partial^2 P}{\partial T\partial V}\right)
\left(\frac{\partial P}{\partial V}\right)_T
- \left(\frac{\partial P}{\partial T}\right)_V
\left(\frac{\partial^2 P}{\partial V^2}\right)_T\right]
\left(\frac{\partial P}{\partial V}\right)_T^{-3}
\left(\frac{\partial P}{\partial T}\right)_V
'''
dV_dP = self.dV_dP_g
dP_dV = self.dP_dV_g
d2P_dTdV = self.d2P_dTdV_g
dP_dT = self.dP_dT_g
d2V_dT2 = dV_dP*dV_dP*(-(self.d2P_dT2_g*dP_dV - dP_dT*d2P_dTdV) # unused
+(d2P_dTdV*dP_dV - dP_dT*self.d2P_dV2_g)*dV_dP*dP_dT)
return d2V_dT2
@property
def Vc(self):
r'''Critical volume, [m^3/mol].
.. math::
V_c = \frac{Z_c R T_c}{P_c}
'''
return self.Zc*R*self.Tc/self.Pc
@property
def rho_l(self):
r'''Liquid molar density, [mol/m^3].
.. math::
\rho_l = \frac{1}{V_l}
'''
return 1.0/self.V_l
@property
def rho_g(self):
r'''Gas molar density, [mol/m^3].
.. math::
\rho_g = \frac{1}{V_g}
'''
return 1.0/self.V_g
@property
def dZ_dT_l(self):
r'''Derivative of compressibility factor with respect to temperature
for the liquid phase, [1/K].
.. math::
\frac{\partial Z}{\partial T} = \frac{P}{RT}\left(
\frac{\partial V}{\partial T} - \frac{V}{T}
\right)
'''
T_inv = 1.0/self.T
return self.P*R_inv*T_inv*(self.dV_dT_l - self.V_l*T_inv)
@property
def dZ_dT_g(self):
r'''Derivative of compressibility factor with respect to temperature
for the gas phase, [1/K].
.. math::
\frac{\partial Z}{\partial T} = \frac{P}{RT}\left(
\frac{\partial V}{\partial T} - \frac{V}{T}
\right)
'''
T_inv = 1.0/self.T
return self.P*R_inv*T_inv*(self.dV_dT_g - self.V_g*T_inv)
@property
def dZ_dP_l(self):
r'''Derivative of compressibility factor with respect to pressure
for the liquid phase, [1/Pa].
.. math::
\frac{\partial Z}{\partial P} = \frac{1}{RT}\left(
V - \frac{\partial V}{\partial P}
\right)
'''
return (self.V_l + self.P*self.dV_dP_l)/(self.T*R)
@property
def dZ_dP_g(self):
r'''Derivative of compressibility factor with respect to pressure
for the gas phase, [1/Pa].
.. math::
\frac{\partial Z}{\partial P} = \frac{1}{RT}\left(
V - \frac{\partial V}{\partial P}
\right)
'''
return (self.V_g + self.P*self.dV_dP_g)/(self.T*R)
d2V_dTdP_l = d2V_dPdT_l
d2V_dTdP_g = d2V_dPdT_g
d2T_dVdP_l = d2T_dPdV_l
d2T_dVdP_g = d2T_dPdV_g
@property
def d2P_dVdT_l(self):
'''Alias of :obj:`GCEOS.d2P_dTdV_l`'''
return self.d2P_dTdV_l
@property
def d2P_dVdT_g(self):
'''Alias of :obj:`GCEOS.d2P_dTdV_g`'''
return self.d2P_dTdV_g
@property
def dP_drho_l(self):
r'''Derivative of pressure with respect to molar density for the liquid
phase, [Pa/(mol/m^3)].
.. math::
\frac{\partial P}{\partial \rho} = -V^2 \frac{\partial P}{\partial V}
'''
return -self.V_l*self.V_l*self.dP_dV_l
@property
def dP_drho_g(self):
r'''Derivative of pressure with respect to molar density for the gas
phase, [Pa/(mol/m^3)].
.. math::
\frac{\partial P}{\partial \rho} = -V^2 \frac{\partial P}{\partial V}
'''
return -self.V_g*self.V_g*self.dP_dV_g
@property
def drho_dP_l(self):
r'''Derivative of molar density with respect to pressure for the liquid
phase, [(mol/m^3)/Pa].
.. math::
\frac{\partial \rho}{\partial P} = \frac{-1}{V^2} \frac{\partial V}{\partial P}
'''
return -self.dV_dP_l/(self.V_l*self.V_l)
@property
def drho_dP_g(self):
r'''Derivative of molar density with respect to pressure for the gas
phase, [(mol/m^3)/Pa].
.. math::
\frac{\partial \rho}{\partial P} = \frac{-1}{V^2} \frac{\partial V}{\partial P}
'''
return -self.dV_dP_g/(self.V_g*self.V_g)
@property
def d2P_drho2_l(self):
r'''Second derivative of pressure with respect to molar density for the
liquid phase, [Pa/(mol/m^3)^2].
.. math::
\frac{\partial^2 P}{\partial \rho^2} = -V^2\left(
-V^2\frac{\partial^2 P}{\partial V^2} - 2V \frac{\partial P}{\partial V}
\right)
'''
return -self.V_l**2*(-self.V_l**2*self.d2P_dV2_l - 2*self.V_l*self.dP_dV_l)
@property
def d2P_drho2_g(self):
r'''Second derivative of pressure with respect to molar density for the
gas phase, [Pa/(mol/m^3)^2].
.. math::
\frac{\partial^2 P}{\partial \rho^2} = -V^2\left(
-V^2\frac{\partial^2 P}{\partial V^2} - 2V \frac{\partial P}{\partial V}
\right)
'''
return -self.V_g**2*(-self.V_g**2*self.d2P_dV2_g - 2*self.V_g*self.dP_dV_g)
@property
def d2rho_dP2_l(self):
r'''Second derivative of molar density with respect to pressure for the
liquid phase, [(mol/m^3)/Pa^2].
.. math::
\frac{\partial^2 \rho}{\partial P^2} =
-\frac{\partial^2 V}{\partial P^2}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial P}\right)^2\frac{1}{V^3}
'''
return -self.d2V_dP2_l/self.V_l**2 + 2*self.dV_dP_l**2/self.V_l**3
@property
def d2rho_dP2_g(self):
r'''Second derivative of molar density with respect to pressure for the
gas phase, [(mol/m^3)/Pa^2].
.. math::
\frac{\partial^2 \rho}{\partial P^2} =
-\frac{\partial^2 V}{\partial P^2}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial P}\right)^2\frac{1}{V^3}
'''
return -self.d2V_dP2_g/self.V_g**2 + 2*self.dV_dP_g**2/self.V_g**3
@property
def dT_drho_l(self):
r'''Derivative of temperature with respect to molar density for the
liquid phase, [K/(mol/m^3)].
.. math::
\frac{\partial T}{\partial \rho} = V^2 \frac{\partial T}{\partial V}
'''
return -self.V_l*self.V_l*self.dT_dV_l
@property
def dT_drho_g(self):
r'''Derivative of temperature with respect to molar density for the
gas phase, [K/(mol/m^3)].
.. math::
\frac{\partial T}{\partial \rho} = V^2 \frac{\partial T}{\partial V}
'''
return -self.V_g*self.V_g*self.dT_dV_g
@property
def d2T_drho2_l(self):
r'''Second derivative of temperature with respect to molar density for
the liquid phase, [K/(mol/m^3)^2].
.. math::
\frac{\partial^2 T}{\partial \rho^2} =
-V^2(-V^2 \frac{\partial^2 T}{\partial V^2} -2V \frac{\partial T}{\partial V} )
'''
return -self.V_l**2*(-self.V_l**2*self.d2T_dV2_l - 2*self.V_l*self.dT_dV_l)
@property
def d2T_drho2_g(self):
r'''Second derivative of temperature with respect to molar density for
the gas phase, [K/(mol/m^3)^2].
.. math::
\frac{\partial^2 T}{\partial \rho^2} =
-V^2(-V^2 \frac{\partial^2 T}{\partial V^2} -2V \frac{\partial T}{\partial V} )
'''
return -self.V_g**2*(-self.V_g**2*self.d2T_dV2_g - 2*self.V_g*self.dT_dV_g)
@property
def drho_dT_l(self):
r'''Derivative of molar density with respect to temperature for the
liquid phase, [(mol/m^3)/K].
.. math::
\frac{\partial \rho}{\partial T} = - \frac{1}{V^2}
\frac{\partial V}{\partial T}
'''
return -self.dV_dT_l/(self.V_l*self.V_l)
@property
def drho_dT_g(self):
r'''Derivative of molar density with respect to temperature for the
gas phase, [(mol/m^3)/K].
.. math::
\frac{\partial \rho}{\partial T} = - \frac{1}{V^2}
\frac{\partial V}{\partial T}
'''
return -self.dV_dT_g/(self.V_g*self.V_g)
@property
def d2rho_dT2_l(self):
r'''Second derivative of molar density with respect to temperature for
the liquid phase, [(mol/m^3)/K^2].
.. math::
\frac{\partial^2 \rho}{\partial T^2} =
-\frac{\partial^2 V}{\partial T^2}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial T}\right)^2\frac{1}{V^3}
'''
return -self.d2V_dT2_l/self.V_l**2 + 2*self.dV_dT_l**2/self.V_l**3
@property
def d2rho_dT2_g(self):
r'''Second derivative of molar density with respect to temperature for
the gas phase, [(mol/m^3)/K^2].
.. math::
\frac{\partial^2 \rho}{\partial T^2} =
-\frac{\partial^2 V}{\partial T^2}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial T}\right)^2\frac{1}{V^3}
'''
return -self.d2V_dT2_g/self.V_g**2 + 2*self.dV_dT_g**2/self.V_g**3
@property
def d2P_dTdrho_l(self):
r'''Derivative of pressure with respect to molar density, and
temperature for the liquid phase, [Pa/(K*mol/m^3)].
.. math::
\frac{\partial^2 P}{\partial \rho\partial T}
= -V^2 \frac{\partial^2 P}{\partial T \partial V}
'''
return -(self.V_l*self.V_l)*self.d2P_dTdV_l
@property
def d2P_dTdrho_g(self):
r'''Derivative of pressure with respect to molar density, and
temperature for the gas phase, [Pa/(K*mol/m^3)].
.. math::
\frac{\partial^2 P}{\partial \rho\partial T}
= -V^2 \frac{\partial^2 P}{\partial T \partial V}
'''
return -(self.V_g*self.V_g)*self.d2P_dTdV_g
@property
def d2T_dPdrho_l(self):
r'''Derivative of temperature with respect to molar density, and
pressure for the liquid phase, [K/(Pa*mol/m^3)].
.. math::
\frac{\partial^2 T}{\partial \rho\partial P}
= -V^2 \frac{\partial^2 T}{\partial P \partial V}
'''
return -(self.V_l*self.V_l)*self.d2T_dPdV_l
@property
def d2T_dPdrho_g(self):
r'''Derivative of temperature with respect to molar density, and
pressure for the gas phase, [K/(Pa*mol/m^3)].
.. math::
\frac{\partial^2 T}{\partial \rho\partial P}
= -V^2 \frac{\partial^2 T}{\partial P \partial V}
'''
return -(self.V_g*self.V_g)*self.d2T_dPdV_g
@property
def d2rho_dPdT_l(self):
r'''Second derivative of molar density with respect to pressure
and temperature for the liquid phase, [(mol/m^3)/(K*Pa)].
.. math::
\frac{\partial^2 \rho}{\partial T \partial P} =
-\frac{\partial^2 V}{\partial T \partial P}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial T}\right)
\left(\frac{\partial V}{\partial P}\right)
\frac{1}{V^3}
'''
return -self.d2V_dPdT_l/self.V_l**2 + 2*self.dV_dT_l*self.dV_dP_l/self.V_l**3
@property
def d2rho_dPdT_g(self):
r'''Second derivative of molar density with respect to pressure
and temperature for the gas phase, [(mol/m^3)/(K*Pa)].
.. math::
\frac{\partial^2 \rho}{\partial T \partial P} =
-\frac{\partial^2 V}{\partial T \partial P}\frac{1}{V^2}
+ 2 \left(\frac{\partial V}{\partial T}\right)
\left(\frac{\partial V}{\partial P}\right)
\frac{1}{V^3}
'''
return -self.d2V_dPdT_g/self.V_g**2 + 2*self.dV_dT_g*self.dV_dP_g/self.V_g**3
@property
def dH_dep_dT_l(self):
r'''Derivative of departure enthalpy with respect to
temperature for the liquid phase, [(J/mol)/K].
.. math::
\frac{\partial H_{dep, l}}{\partial T} = P \frac{d}{d T} V{\left (T
\right )} - R + \frac{2 T}{\sqrt{\delta^{2} - 4 \epsilon}}
\operatorname{atanh}{\left (\frac{\delta + 2 V{\left (T \right
)}}{\sqrt{\delta^{2} - 4 \epsilon}} \right )} \frac{d^{2}}{d
T^{2}} \operatorname{a \alpha}{\left (T \right )} + \frac{4
\left(T \frac{d}{d T} \operatorname{a \alpha}{\left (T \right
)} - \operatorname{a \alpha}{\left (T \right )}\right) \frac{d}
{d T} V{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon
\right) \left(- \frac{\left(\delta + 2 V{\left (T \right )}
\right)^{2}}{\delta^{2} - 4 \epsilon} + 1\right)}
'''
x0 = self.V_l
x1 = self.dV_dT_l
x2 = self.a_alpha
x3 = self.delta*self.delta - 4.0*self.epsilon
if x3 == 0.0:
x3 = 1e-100
x4 = x3**-0.5
x5 = self.delta + x0 + x0
x6 = 1.0/x3
return (self.P*x1 - R + 2.0*self.T*x4*catanh(x4*x5).real*self.d2a_alpha_dT2
- 4.0*x1*x6*(self.T*self.da_alpha_dT - x2)/(x5*x5*x6 - 1.0))
@property
def dH_dep_dT_g(self):
r'''Derivative of departure enthalpy with respect to
temperature for the gas phase, [(J/mol)/K].
.. math::
\frac{\partial H_{dep, g}}{\partial T} = P \frac{d}{d T} V{\left (T
\right )} - R + \frac{2 T}{\sqrt{\delta^{2} - 4 \epsilon}}
\operatorname{atanh}{\left (\frac{\delta + 2 V{\left (T \right
)}}{\sqrt{\delta^{2} - 4 \epsilon}} \right )} \frac{d^{2}}{d
T^{2}} \operatorname{a \alpha}{\left (T \right )} + \frac{4
\left(T \frac{d}{d T} \operatorname{a \alpha}{\left (T \right
)} - \operatorname{a \alpha}{\left (T \right )}\right) \frac{d}
{d T} V{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon
\right) \left(- \frac{\left(\delta + 2 V{\left (T \right )}
\right)^{2}}{\delta^{2} - 4 \epsilon} + 1\right)}
'''
x0 = self.V_g
x1 = self.dV_dT_g
if x0 > 1e50:
if isinf(self.dV_dT_g) or self.H_dep_g == 0.0:
return 0.0
x2 = self.a_alpha
x3 = self.delta*self.delta - 4.0*self.epsilon
if x3 == 0.0:
x3 = 1e-100
x4 = x3**-0.5
x5 = self.delta + x0 + x0
x6 = 1.0/x3
return (self.P*x1 - R + 2.0*self.T*x4*catanh(x4*x5).real*self.d2a_alpha_dT2
- 4.0*x1*x6*(self.T*self.da_alpha_dT - x2)/(x5*x5*x6 - 1.0))
@property
def dH_dep_dT_l_V(self):
r'''Derivative of departure enthalpy with respect to
temperature at constant volume for the liquid phase, [(J/mol)/K].
.. math::
\left(\frac{\partial H_{dep, l}}{\partial T}\right)_{V} =
- R + \frac{2 T
\operatorname{atanh}{\left(\frac{2 V_l + \delta}{\sqrt{\delta^{2}
- 4 \epsilon}} \right)} \frac{d^{2}}{d T^{2}} \operatorname{
a_{\alpha}}{\left(T \right)}}{\sqrt{\delta^{2} - 4 \epsilon}}
+ V_l \frac{\partial}{\partial T} P{\left(T,V \right)}
'''
T = self.T
delta, epsilon = self.delta, self.epsilon
V = self.V_l
dP_dT = self.dP_dT_l
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return -R + 2.0*T*x0*catanh(x0*(V + V + delta)).real*self.d2a_alpha_dT2 + V*dP_dT
@property
def dH_dep_dT_g_V(self):
r'''Derivative of departure enthalpy with respect to
temperature at constant volume for the gas phase, [(J/mol)/K].
.. math::
\left(\frac{\partial H_{dep, g}}{\partial T}\right)_{V} =
- R + \frac{2 T
\operatorname{atanh}{\left(\frac{2 V_g + \delta}{\sqrt{\delta^{2}
- 4 \epsilon}} \right)} \frac{d^{2}}{d T^{2}} \operatorname{
a_{\alpha}}{\left(T \right)}}{\sqrt{\delta^{2} - 4 \epsilon}}
+ V_g \frac{\partial}{\partial T} P{\left(T,V \right)}
'''
T = self.T
delta, epsilon = self.delta, self.epsilon
V = self.V_g
dP_dT = self.dP_dT_g
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return -R + 2.0*T*x0*catanh(x0*(V + V + delta)).real*self.d2a_alpha_dT2 + V*dP_dT
@property
def dH_dep_dP_l(self):
r'''Derivative of departure enthalpy with respect to
pressure for the liquid phase, [(J/mol)/Pa].
.. math::
\frac{\partial H_{dep, l}}{\partial P} = P \frac{d}{d P} V{\left (P
\right )} + V{\left (P \right )} + \frac{4 \left(T \frac{d}{d T}
\operatorname{a \alpha}{\left (T \right )} - \operatorname{a
\alpha}{\left (T \right )}\right) \frac{d}{d P} V{\left (P \right
)}}{\left(\delta^{2} - 4 \epsilon\right) \left(- \frac{\left(\delta
+ 2 V{\left (P \right )}\right)^{2}}{\delta^{2} - 4 \epsilon}
+ 1\right)}
'''
delta = self.delta
x0 = self.V_l
x2 = delta*delta - 4.0*self.epsilon
x4 = (delta + x0 + x0)
return (x0 + self.dV_dP_l*(self.P - 4.0*(self.T*self.da_alpha_dT
- self.a_alpha)/(x4*x4 - x2)))
@property
def dH_dep_dP_g(self):
r'''Derivative of departure enthalpy with respect to
pressure for the gas phase, [(J/mol)/Pa].
.. math::
\frac{\partial H_{dep, g}}{\partial P} = P \frac{d}{d P} V{\left (P
\right )} + V{\left (P \right )} + \frac{4 \left(T \frac{d}{d T}
\operatorname{a \alpha}{\left (T \right )} - \operatorname{a
\alpha}{\left (T \right )}\right) \frac{d}{d P} V{\left (P \right
)}}{\left(\delta^{2} - 4 \epsilon\right) \left(- \frac{\left(\delta
+ 2 V{\left (P \right )}\right)^{2}}{\delta^{2} - 4 \epsilon}
+ 1\right)}
'''
delta = self.delta
x0 = self.V_g
x2 = delta*delta - 4.0*self.epsilon
x4 = (delta + x0 + x0)
# if isinf(self.dV_dP_g):
# This does not appear to be correct
# return 0.0
return (x0 + self.dV_dP_g*(self.P - 4.0*(self.T*self.da_alpha_dT
- self.a_alpha)/(x4*x4 - x2)))
@property
def dH_dep_dP_l_V(self):
r'''Derivative of departure enthalpy with respect to
pressure at constant volume for the gas phase, [(J/mol)/Pa].
.. math::
\left(\frac{\partial H_{dep, g}}{\partial P}\right)_{V} =
- R \left(\frac{\partial T}{\partial P}\right)_V + V + \frac{2 \left(T
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}
+ \left(\frac{\partial a \alpha}{\partial T}\right)_P
\left(\frac{\partial T}{\partial P}\right)_V - \left(\frac{
\partial a \alpha}{\partial P}\right)_{V} \right)
\operatorname{atanh}{\left(\frac{2 V + \delta}
{\sqrt{\delta^{2} - 4 \epsilon}} \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
T, V, delta, epsilon = self.T, self.V_l, self.delta, self.epsilon
da_alpha_dT, d2a_alpha_dT2 = self.da_alpha_dT, self.d2a_alpha_dT2
dT_dP = self.dT_dP_l
d2a_alpha_dTdP_V = d2a_alpha_dT2*dT_dP
da_alpha_dP_V = da_alpha_dT*dT_dP
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return (-R*dT_dP + V + 2.0*x0*(
T*d2a_alpha_dTdP_V + dT_dP*da_alpha_dT - da_alpha_dP_V)
*catanh(x0*(V + V + delta)).real)
@property
def dH_dep_dP_g_V(self):
r'''Derivative of departure enthalpy with respect to
pressure at constant volume for the liquid phase, [(J/mol)/Pa].
.. math::
\left(\frac{\partial H_{dep, g}}{\partial P}\right)_{V} =
- R \left(\frac{\partial T}{\partial P}\right)_V + V + \frac{2 \left(T
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}
+ \left(\frac{\partial a \alpha}{\partial T}\right)_P
\left(\frac{\partial T}{\partial P}\right)_V - \left(\frac{
\partial a \alpha}{\partial P}\right)_{V} \right)
\operatorname{atanh}{\left(\frac{2 V + \delta}
{\sqrt{\delta^{2} - 4 \epsilon}} \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
T, V, delta, epsilon = self.T, self.V_g, self.delta, self.epsilon
da_alpha_dT, d2a_alpha_dT2 = self.da_alpha_dT, self.d2a_alpha_dT2
dT_dP = self.dT_dP_g
d2a_alpha_dTdP_V = d2a_alpha_dT2*dT_dP
da_alpha_dP_V = da_alpha_dT*dT_dP
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return (-R*dT_dP + V + 2.0*x0*(
T*d2a_alpha_dTdP_V + dT_dP*da_alpha_dT - da_alpha_dP_V)
*catanh(x0*(V + V + delta)).real)
@property
def dH_dep_dV_g_T(self):
r'''Derivative of departure enthalpy with respect to
volume at constant temperature for the gas phase, [J/m^3].
.. math::
\left(\frac{\partial H_{dep, g}}{\partial V}\right)_{T} =
\left(\frac{\partial H_{dep, g}}{\partial P}\right)_{T} \cdot
\left(\frac{\partial P}{\partial V}\right)_{T}
'''
return self.dH_dep_dP_g*self.dP_dV_g
@property
def dH_dep_dV_l_T(self):
r'''Derivative of departure enthalpy with respect to
volume at constant temperature for the gas phase, [J/m^3].
.. math::
\left(\frac{\partial H_{dep, l}}{\partial V}\right)_{T} =
\left(\frac{\partial H_{dep, l}}{\partial P}\right)_{T} \cdot
\left(\frac{\partial P}{\partial V}\right)_{T}
'''
return self.dH_dep_dP_l*self.dP_dV_l
@property
def dH_dep_dV_g_P(self):
r'''Derivative of departure enthalpy with respect to
volume at constant pressure for the gas phase, [J/m^3].
.. math::
\left(\frac{\partial H_{dep, g}}{\partial V}\right)_{P} =
\left(\frac{\partial H_{dep, g}}{\partial T}\right)_{P} \cdot
\left(\frac{\partial T}{\partial V}\right)_{P}
'''
return self.dH_dep_dT_g*self.dT_dV_g
@property
def dH_dep_dV_l_P(self):
r'''Derivative of departure enthalpy with respect to
volume at constant pressure for the liquid phase, [J/m^3].
.. math::
\left(\frac{\partial H_{dep, l}}{\partial V}\right)_{P} =
\left(\frac{\partial H_{dep, l}}{\partial T}\right)_{P} \cdot
\left(\frac{\partial T}{\partial V}\right)_{P}
'''
return self.dH_dep_dT_l*self.dT_dV_l
@property
def dS_dep_dT_l(self):
r'''Derivative of departure entropy with respect to
temperature for the liquid phase, [(J/mol)/K^2].
.. math::
\frac{\partial S_{dep, l}}{\partial T} = - \frac{R \frac{d}{d T}
V{\left (T \right )}}{V{\left (T \right )}} + \frac{R \frac{d}{d T}
V{\left (T \right )}}{- b + V{\left (T \right )}} + \frac{4
\frac{d}{d T} V{\left (T \right )} \frac{d}{d T} \operatorname{a
\alpha}{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon\right)
\left(- \frac{\left(\delta + 2 V{\left (T \right )}\right)^{2}}
{\delta^{2} - 4 \epsilon} + 1\right)} + \frac{2 \frac{d^{2}}{d
T^{2}} \operatorname{a \alpha}{\left (T \right )}}
{\sqrt{\delta^{2} - 4 \epsilon}} \operatorname{atanh}{\left (\frac{
\delta + 2 V{\left (T \right )}}{\sqrt{\delta^{2} - 4 \epsilon}}
\right )} + \frac{R^{2} T}{P V{\left (T \right )}} \left(\frac{P}
{R T} \frac{d}{d T} V{\left (T \right )} - \frac{P}{R T^{2}}
V{\left (T \right )}\right)
'''
x0 = self.V_l
x1 = 1./x0
x2 = self.dV_dT_l
x3 = R*x2
x4 = self.a_alpha
x5 = self.delta*self.delta - 4.0*self.epsilon
if x5 == 0.0:
x5 = 1e-100
x6 = x5**-0.5
x7 = self.delta + 2.0*x0
x8 = 1.0/x5
return (R*x1*(x2 - x0/self.T) - x1*x3 - 4.0*x2*x8*self.da_alpha_dT
/(x7*x7*x8 - 1.0) - x3/(self.b - x0)
+ 2.0*x6*catanh(x6*x7).real*self.d2a_alpha_dT2)
@property
def dS_dep_dT_g(self):
r'''Derivative of departure entropy with respect to
temperature for the gas phase, [(J/mol)/K^2].
.. math::
\frac{\partial S_{dep, g}}{\partial T} = - \frac{R \frac{d}{d T}
V{\left (T \right )}}{V{\left (T \right )}} + \frac{R \frac{d}{d T}
V{\left (T \right )}}{- b + V{\left (T \right )}} + \frac{4
\frac{d}{d T} V{\left (T \right )} \frac{d}{d T} \operatorname{a
\alpha}{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon\right)
\left(- \frac{\left(\delta + 2 V{\left (T \right )}\right)^{2}}
{\delta^{2} - 4 \epsilon} + 1\right)} + \frac{2 \frac{d^{2}}{d
T^{2}} \operatorname{a \alpha}{\left (T \right )}}
{\sqrt{\delta^{2} - 4 \epsilon}} \operatorname{atanh}{\left (\frac{
\delta + 2 V{\left (T \right )}}{\sqrt{\delta^{2} - 4 \epsilon}}
\right )} + \frac{R^{2} T}{P V{\left (T \right )}} \left(\frac{P}
{R T} \frac{d}{d T} V{\left (T \right )} - \frac{P}{R T^{2}}
V{\left (T \right )}\right)
'''
x0 = self.V_g
if x0 > 1e50:
if self.S_dep_g == 0.0:
return 0.0
x1 = 1./x0
x2 = self.dV_dT_g
if isinf(x2):
return 0.0
x3 = R*x2
x4 = self.a_alpha
x5 = self.delta*self.delta - 4.0*self.epsilon
if x5 == 0.0:
x5 = 1e-100
x6 = x5**-0.5
x7 = self.delta + 2.0*x0
x8 = 1.0/x5
return (R*x1*(x2 - x0/self.T) - x1*x3 - 4.0*x2*x8*self.da_alpha_dT
/(x7*x7*x8 - 1.0) - x3/(self.b - x0)
+ 2.0*x6*catanh(x6*x7).real*self.d2a_alpha_dT2)
@property
def dS_dep_dT_l_V(self):
r'''Derivative of departure entropy with respect to
temperature at constant volume for the liquid phase, [(J/mol)/K^2].
.. math::
\left(\frac{\partial S_{dep, l}}{\partial T}\right)_{V} =
\frac{R^{2} T \left(\frac{V \frac{\partial}{\partial T} P{\left(T,V
\right)}}{R T} - \frac{V P{\left(T,V \right)}}{R T^{2}}\right)}{
V P{\left(T,V \right)}} + \frac{2 \operatorname{atanh}{\left(
\frac{2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right)}
\frac{d^{2}}{d T^{2}} \operatorname{a \alpha}{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}}
'''
T, P = self.T, self.P
delta, epsilon = self.delta, self.epsilon
V = self.V_l
dP_dT = self.dP_dT_l
try:
x1 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x1 = 1e100
return (R*(dP_dT/P - 1.0/T) + 2.0*x1*catanh(x1*(V + V + delta)).real*self.d2a_alpha_dT2)
@property
def dS_dep_dT_g_V(self):
r'''Derivative of departure entropy with respect to
temperature at constant volume for the gas phase, [(J/mol)/K^2].
.. math::
\left(\frac{\partial S_{dep, g}}{\partial T}\right)_{V} =
\frac{R^{2} T \left(\frac{V \frac{\partial}{\partial T} P{\left(T,V
\right)}}{R T} - \frac{V P{\left(T,V \right)}}{R T^{2}}\right)}{
V P{\left(T,V \right)}} + \frac{2 \operatorname{atanh}{\left(
\frac{2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right)}
\frac{d^{2}}{d T^{2}} \operatorname{a \alpha}{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}}
'''
T, P = self.T, self.P
delta, epsilon = self.delta, self.epsilon
V = self.V_g
dP_dT = self.dP_dT_g
try:
x1 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x1 = 1e100
return (R*(dP_dT/P - 1.0/T) + 2.0*x1*catanh(x1*(V + V + delta)).real*self.d2a_alpha_dT2)
@property
def dS_dep_dP_l(self):
r'''Derivative of departure entropy with respect to
pressure for the liquid phase, [(J/mol)/K/Pa].
.. math::
\frac{\partial S_{dep, l}}{\partial P} = - \frac{R \frac{d}{d P}
V{\left (P \right )}}{V{\left (P \right )}} + \frac{R \frac{d}{d P}
V{\left (P \right )}}{- b + V{\left (P \right )}} + \frac{4 \frac{
d}{d P} V{\left (P \right )} \frac{d}{d T} \operatorname{a \alpha}
{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon\right) \left(
- \frac{\left(\delta + 2 V{\left (P \right )}\right)^{2}}{
\delta^{2} - 4 \epsilon} + 1\right)} + \frac{R^{2} T}{P V{\left (P
\right )}} \left(\frac{P}{R T} \frac{d}{d P} V{\left (P \right )}
+ \frac{V{\left (P \right )}}{R T}\right)
'''
x0 = self.V_l
x1 = 1.0/x0
x2 = self.dV_dP_l
x3 = R*x2
try:
x4 = 1.0/(self.delta*self.delta - 4.0*self.epsilon)
except ZeroDivisionError:
x4 = 1e50
return (-x1*x3 - 4.0*x2*x4*self.da_alpha_dT/(x4*(self.delta + 2*x0)**2
- 1) - x3/(self.b - x0) + R*x1*(self.P*x2 + x0)/self.P)
@property
def dS_dep_dP_g(self):
r'''Derivative of departure entropy with respect to
pressure for the gas phase, [(J/mol)/K/Pa].
.. math::
\frac{\partial S_{dep, g}}{\partial P} = - \frac{R \frac{d}{d P}
V{\left (P \right )}}{V{\left (P \right )}} + \frac{R \frac{d}{d P}
V{\left (P \right )}}{- b + V{\left (P \right )}} + \frac{4 \frac{
d}{d P} V{\left (P \right )} \frac{d}{d T} \operatorname{a \alpha}
{\left (T \right )}}{\left(\delta^{2} - 4 \epsilon\right) \left(
- \frac{\left(\delta + 2 V{\left (P \right )}\right)^{2}}{
\delta^{2} - 4 \epsilon} + 1\right)} + \frac{R^{2} T}{P V{\left (P
\right )}} \left(\frac{P}{R T} \frac{d}{d P} V{\left (P \right )}
+ \frac{V{\left (P \right )}}{R T}\right)
'''
x0 = self.V_g
x1 = 1.0/x0
x2 = self.dV_dP_g
x3 = R*x2
try:
x4 = 1.0/(self.delta*self.delta - 4.0*self.epsilon)
except ZeroDivisionError:
x4 = 1e200
ans = (-x1*x3 - 4.0*x2*x4*self.da_alpha_dT/(x4*(self.delta + 2*x0)**2
- 1) - x3/(self.b - x0) + R*x1*(self.P*x2 + x0)/self.P)
return ans
@property
def dS_dep_dP_g_V(self):
r'''Derivative of departure entropy with respect to
pressure at constant volume for the gas phase, [(J/mol)/K/Pa].
.. math::
\left(\frac{\partial S_{dep, g}}{\partial P}\right)_{V} =
\frac{2 \operatorname{atanh}{\left(\frac{2 V + \delta}{
\sqrt{\delta^{2} - 4 \epsilon}} \right)}
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}}{\sqrt{\delta^{2} - 4 \epsilon}}
+ \frac{R^{2} \left(- \frac{P V \frac{d}{d P} T{\left(P \right)}}
{R T^{2}{\left(P \right)}}
+ \frac{V}{R T{\left(P \right)}}\right) T{\left(P \right)}}{P V}
'''
T, P, delta, epsilon = self.T, self.P, self.delta, self.epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
V, dT_dP = self.V_g, self.dT_dP_g
d2a_alpha_dTdP_V = d2a_alpha_dT2*dT_dP
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return (2.0*x0*catanh(x0*(V + V + delta)).real*d2a_alpha_dTdP_V
- R*(P*dT_dP/T - 1.0)/P)
@property
def dS_dep_dP_l_V(self):
r'''Derivative of departure entropy with respect to
pressure at constant volume for the liquid phase, [(J/mol)/K/Pa].
.. math::
\left(\frac{\partial S_{dep, l}}{\partial P}\right)_{V} =
\frac{2 \operatorname{atanh}{\left(\frac{2 V + \delta}{
\sqrt{\delta^{2} - 4 \epsilon}} \right)}
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}}{\sqrt{\delta^{2} - 4 \epsilon}}
+ \frac{R^{2} \left(- \frac{P V \frac{d}{d P} T{\left(P \right)}}
{R T^{2}{\left(P \right)}}
+ \frac{V}{R T{\left(P \right)}}\right) T{\left(P \right)}}{P V}
'''
T, P, delta, epsilon = self.T, self.P, self.delta, self.epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
V, dT_dP = self.V_l, self.dT_dP_l
d2a_alpha_dTdP_V = d2a_alpha_dT2*dT_dP
try:
x0 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x0 = 1e100
return (2.0*x0*catanh(x0*(V + V + delta)).real*d2a_alpha_dTdP_V
- R*(P*dT_dP/T - 1.0)/P)
@property
def dS_dep_dV_g_T(self):
r'''Derivative of departure entropy with respect to
volume at constant temperature for the gas phase, [J/K/m^3].
.. math::
\left(\frac{\partial S_{dep, g}}{\partial V}\right)_{T} =
\left(\frac{\partial S_{dep, g}}{\partial P}\right)_{T} \cdot
\left(\frac{\partial P}{\partial V}\right)_{T}
'''
return self.dS_dep_dP_g*self.dP_dV_g
@property
def dS_dep_dV_l_T(self):
r'''Derivative of departure entropy with respect to
volume at constant temperature for the gas phase, [J/K/m^3].
.. math::
\left(\frac{\partial S_{dep, l}}{\partial V}\right)_{T} =
\left(\frac{\partial S_{dep, l}}{\partial P}\right)_{T} \cdot
\left(\frac{\partial P}{\partial V}\right)_{T}
'''
return self.dS_dep_dP_l*self.dP_dV_l
@property
def dS_dep_dV_g_P(self):
r'''Derivative of departure entropy with respect to
volume at constant pressure for the gas phase, [J/K/m^3].
.. math::
\left(\frac{\partial S_{dep, g}}{\partial V}\right)_{P} =
\left(\frac{\partial S_{dep, g}}{\partial T}\right)_{P} \cdot
\left(\frac{\partial T}{\partial V}\right)_{P}
'''
return self.dS_dep_dT_g*self.dT_dV_g
@property
def dS_dep_dV_l_P(self):
r'''Derivative of departure entropy with respect to
volume at constant pressure for the liquid phase, [J/K/m^3].
.. math::
\left(\frac{\partial S_{dep, l}}{\partial V}\right)_{P} =
\left(\frac{\partial S_{dep, l}}{\partial T}\right)_{P} \cdot
\left(\frac{\partial T}{\partial V}\right)_{P}
'''
return self.dS_dep_dT_l*self.dT_dV_l
@property
def d2H_dep_dT2_g(self):
r'''Second temperature derivative of departure enthalpy with respect to
temperature for the gas phase, [(J/mol)/K^2].
.. math::
\frac{\partial^2 H_{dep, g}}{\partial T^2} =
P \frac{d^{2}}{d T^{2}} V{\left(T \right)} - \frac{8 T \frac{d}{d T}
V{\left(T \right)} \frac{d^{2}}{d T^{2}} \operatorname{a\alpha}
{\left(T \right)}}{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{
\left(\delta + 2 V{\left(T \right)}\right)^{2}}{\delta^{2}
- 4 \epsilon} - 1\right)} + \frac{2 T \operatorname{atanh}{\left(
\frac{\delta + 2 V{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} + \frac{16 \left(\delta + 2 V{\left(T \right)}
\right) \left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T
\right)} - \operatorname{a\alpha}{\left(T \right)}\right) \left(
\frac{d}{d T} V{\left(T \right)}\right)^{2}}{\left(\delta^{2}
- 4 \epsilon\right)^{2} \left(\frac{\left(\delta + 2 V{\left(T
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)^{2}}
- \frac{4 \left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T
\right)} - \operatorname{a\alpha}{\left(T \right)}\right)
\frac{d^{2}}{d T^{2}} V{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} + \frac{2
\operatorname{atanh}{\left(\frac{\delta + 2 V{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
T, P, delta, epsilon = self.T, self.P, self.delta, self.epsilon
x0 = self.V_g
x1 = self.d2V_dT2_g
x2 = self.a_alpha
x3 = self.d2a_alpha_dT2
x4 = delta*delta - 4.0*epsilon
try:
x5 = x4**-0.5
except:
x5 = 1e100
x6 = delta + x0 + x0
x7 = 2.0*x5*catanh(x5*x6).real
x8 = self.dV_dT_g
x9 = x5*x5
x10 = x6*x6*x9 - 1.0
x11 = x9/x10
x12 = T*self.da_alpha_dT - x2
x50 = self.d3a_alpha_dT3
return (P*x1 + x3*x7 + T*x7*x50- 4.0*x1*x11*x12 - 8.0*T*x11*x3*x8 + 16.0*x12*x6*x8*x8*x11*x11)
d2H_dep_dT2_g_P = d2H_dep_dT2_g
@property
def d2H_dep_dT2_l(self):
r'''Second temperature derivative of departure enthalpy with respect to
temperature for the liquid phase, [(J/mol)/K^2].
.. math::
\frac{\partial^2 H_{dep, l}}{\partial T^2} =
P \frac{d^{2}}{d T^{2}} V{\left(T \right)} - \frac{8 T \frac{d}{d T}
V{\left(T \right)} \frac{d^{2}}{d T^{2}} \operatorname{a\alpha}
{\left(T \right)}}{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{
\left(\delta + 2 V{\left(T \right)}\right)^{2}}{\delta^{2}
- 4 \epsilon} - 1\right)} + \frac{2 T \operatorname{atanh}{\left(
\frac{\delta + 2 V{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} + \frac{16 \left(\delta + 2 V{\left(T \right)}
\right) \left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T
\right)} - \operatorname{a\alpha}{\left(T \right)}\right) \left(
\frac{d}{d T} V{\left(T \right)}\right)^{2}}{\left(\delta^{2}
- 4 \epsilon\right)^{2} \left(\frac{\left(\delta + 2 V{\left(T
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)^{2}}
- \frac{4 \left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T
\right)} - \operatorname{a\alpha}{\left(T \right)}\right)
\frac{d^{2}}{d T^{2}} V{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} + \frac{2
\operatorname{atanh}{\left(\frac{\delta + 2 V{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
T, P, delta, epsilon = self.T, self.P, self.delta, self.epsilon
x0 = self.V_l
x1 = self.d2V_dT2_l
x2 = self.a_alpha
x3 = self.d2a_alpha_dT2
x4 = delta*delta - 4.0*epsilon
try:
x5 = x4**-0.5
except:
x5 = 1e100
x6 = delta + x0 + x0
x7 = 2.0*x5*catanh(x5*x6).real
x8 = self.dV_dT_l
x9 = x5*x5
x10 = x6*x6*x9 - 1.0
x11 = x9/x10
x12 = T*self.da_alpha_dT - x2
x50 = self.d3a_alpha_dT3
return (P*x1 + x3*x7 + T*x7*x50- 4.0*x1*x11*x12 - 8.0*T*x11*x3*x8 + 16.0*x12*x6*x8*x8*x11*x11)
d2H_dep_dT2_l_P = d2H_dep_dT2_l
@property
def d2S_dep_dT2_g(self):
r'''Second temperature derivative of departure entropy with respect to
temperature for the gas phase, [(J/mol)/K^3].
.. math::
\frac{\partial^2 S_{dep, g}}{\partial T^2} = - \frac{R \left(
\frac{d}{d T} V{\left(T \right)} - \frac{V{\left(T \right)}}{T}
\right) \frac{d}{d T} V{\left(T \right)}}{V^{2}{\left(T \right)}}
+ \frac{R \left(\frac{d^{2}}{d T^{2}} V{\left(T \right)}
- \frac{2 \frac{d}{d T} V{\left(T \right)}}{T} + \frac{2
V{\left(T \right)}}{T^{2}}\right)}{V{\left(T \right)}}
- \frac{R \frac{d^{2}}{d T^{2}} V{\left(T \right)}}{V{\left(T
\right)}} + \frac{R \left(\frac{d}{d T} V{\left(T \right)}
\right)^{2}}{V^{2}{\left(T \right)}} - \frac{R \frac{d^{2}}{dT^{2}}
V{\left(T \right)}}{b - V{\left(T \right)}} - \frac{R \left(
\frac{d}{d T} V{\left(T \right)}\right)^{2}}{\left(b - V{\left(T
\right)}\right)^{2}} + \frac{R \left(\frac{d}{d T} V{\left(T
\right)} - \frac{V{\left(T \right)}}{T}\right)}{T V{\left(T
\right)}} + \frac{16 \left(\delta + 2 V{\left(T \right)}\right)
\left(\frac{d}{d T} V{\left(T \right)}\right)^{2} \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right)^{2} \left(\frac{\left(\delta + 2 V{\left(T
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)^{2}}
- \frac{8 \frac{d}{d T} V{\left(T \right)} \frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} - \frac{4
\frac{d^{2}}{d T^{2}} V{\left(T \right)} \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} + \frac{2
\operatorname{atanh}{\left(\frac{\delta + 2 V{\left(T
\right)}}{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}
{d T^{3}} \operatorname{a\alpha}{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}}
'''
T, P, b, delta, epsilon = self.T, self.P, self.b, self.delta, self.epsilon
V = x0 = self.V_g
V_inv = 1.0/V
x1 = self.d2V_dT2_g
x2 = R*V_inv
x3 = V_inv*V_inv
x4 = self.dV_dT_g
x5 = x4*x4
x6 = R*x5
x7 = b - x0
x8 = 1.0/T
x9 = -x0*x8 + x4
x10 = x0 + x0
x11 = self.a_alpha
x12 = delta*delta - 4.0*epsilon
try:
x13 = x12**-0.5
except ZeroDivisionError:
x13 = 1e100
x14 = delta + x10
x15 = x13*x13
x16 = x14*x14*x15 - 1.0
x51 = 1.0/x16
x17 = x15*x51
x18 = self.da_alpha_dT
x50 = 1.0/x7
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
return (-R*x1*x50 - R*x3*x4*x9 - 4.0*x1*x17*x18 - x1*x2
+ 2.0*x13*catanh(x13*x14).real*d3a_alpha_dT3
- 8.0*x17*x4*d2a_alpha_dT2 + x2*x8*x9
+ x2*(x1 - 2.0*x4*x8 + x10*x8*x8) + x3*x6 - x6*x50*x50
+ 16.0*x14*x18*x5*x51*x51*x15*x15)
@property
def d2S_dep_dT2_l(self):
r'''Second temperature derivative of departure entropy with respect to
temperature for the liquid phase, [(J/mol)/K^3].
.. math::
\frac{\partial^2 S_{dep, l}}{\partial T^2} = - \frac{R \left(
\frac{d}{d T} V{\left(T \right)} - \frac{V{\left(T \right)}}{T}
\right) \frac{d}{d T} V{\left(T \right)}}{V^{2}{\left(T \right)}}
+ \frac{R \left(\frac{d^{2}}{d T^{2}} V{\left(T \right)}
- \frac{2 \frac{d}{d T} V{\left(T \right)}}{T} + \frac{2
V{\left(T \right)}}{T^{2}}\right)}{V{\left(T \right)}}
- \frac{R \frac{d^{2}}{d T^{2}} V{\left(T \right)}}{V{\left(T
\right)}} + \frac{R \left(\frac{d}{d T} V{\left(T \right)}
\right)^{2}}{V^{2}{\left(T \right)}} - \frac{R \frac{d^{2}}{dT^{2}}
V{\left(T \right)}}{b - V{\left(T \right)}} - \frac{R \left(
\frac{d}{d T} V{\left(T \right)}\right)^{2}}{\left(b - V{\left(T
\right)}\right)^{2}} + \frac{R \left(\frac{d}{d T} V{\left(T
\right)} - \frac{V{\left(T \right)}}{T}\right)}{T V{\left(T
\right)}} + \frac{16 \left(\delta + 2 V{\left(T \right)}\right)
\left(\frac{d}{d T} V{\left(T \right)}\right)^{2} \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right)^{2} \left(\frac{\left(\delta + 2 V{\left(T
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)^{2}}
- \frac{8 \frac{d}{d T} V{\left(T \right)} \frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} - \frac{4
\frac{d^{2}}{d T^{2}} V{\left(T \right)} \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T \right)}
\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)} + \frac{2
\operatorname{atanh}{\left(\frac{\delta + 2 V{\left(T
\right)}}{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}
{d T^{3}} \operatorname{a\alpha}{\left(T \right)}}
{\sqrt{\delta^{2} - 4 \epsilon}}
'''
T, P, b, delta, epsilon = self.T, self.P, self.b, self.delta, self.epsilon
V = x0 = self.V_l
V_inv = 1.0/V
x1 = self.d2V_dT2_l
x2 = R*V_inv
x3 = V_inv*V_inv
x4 = self.dV_dT_l
x5 = x4*x4
x6 = R*x5
x7 = b - x0
x8 = 1.0/T
x9 = -x0*x8 + x4
x10 = x0 + x0
x11 = self.a_alpha
x12 = delta*delta - 4.0*epsilon
try:
x13 = x12**-0.5
except ZeroDivisionError:
x13 = 1e100
x14 = delta + x10
x15 = x13*x13
x16 = x14*x14*x15 - 1.0
x51 = 1.0/x16
x17 = x15*x51
x18 = self.da_alpha_dT
x50 = 1.0/x7
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
return (-R*x1*x50 - R*x3*x4*x9 - 4.0*x1*x17*x18 - x1*x2
+ 2.0*x13*catanh(x13*x14).real*d3a_alpha_dT3
- 8.0*x17*x4*d2a_alpha_dT2 + x2*x8*x9
+ x2*(x1 - 2.0*x4*x8 + x10*x8*x8) + x3*x6 - x6*x50*x50
+ 16.0*x14*x18*x5*x51*x51*x15*x15)
@property
def d2H_dep_dT2_g_V(self):
r'''Second temperature derivative of departure enthalpy with respect to
temperature at constant volume for the gas phase, [(J/mol)/K^2].
.. math::
\left(\frac{\partial^2 H_{dep, g}}{\partial T^2}\right)_V =
\frac{2 T \operatorname{atanh}{\left(\frac{2 V + \delta}{\sqrt{
\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} + V \frac{\partial^{2}}{\partial T^{2}}
P{\left(V,T \right)} + \frac{2 \operatorname{atanh}{\left(\frac{
2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{2}}
{d T^{2}} \operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
V, T, delta, epsilon = self.V_g, self.T, self.delta, self.epsilon
x51 = delta*delta - 4.0*epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
d2P_dT2 = self.d2P_dT2_g
try:
x1 = x51**-0.5
except ZeroDivisionError:
x1 = 1e100
x2 = 2.0*x1*catanh(x1*(V + V + delta)).real
return T*x2*d3a_alpha_dT3 + V*d2P_dT2 + x2*d2a_alpha_dT2
@property
def d2H_dep_dT2_l_V(self):
r'''Second temperature derivative of departure enthalpy with respect to
temperature at constant volume for the liquid phase, [(J/mol)/K^2].
.. math::
\left(\frac{\partial^2 H_{dep, l}}{\partial T^2}\right)_V =
\frac{2 T \operatorname{atanh}{\left(\frac{2 V + \delta}{\sqrt{
\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}} + V \frac{\partial^{2}}{\partial T^{2}}
P{\left(V,T \right)} + \frac{2 \operatorname{atanh}{\left(\frac{
2 V + \delta}{\sqrt{\delta^{2} - 4 \epsilon}} \right)} \frac{d^{2}}
{d T^{2}} \operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
V, T, delta, epsilon = self.V_l, self.T, self.delta, self.epsilon
x51 = delta*delta - 4.0*epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
d2P_dT2 = self.d2P_dT2_l
try:
x1 = x51**-0.5
except ZeroDivisionError:
x1 = 1e100
x2 = 2.0*x1*catanh(x1*(V + V + delta)).real
return T*x2*d3a_alpha_dT3 + V*d2P_dT2 + x2*d2a_alpha_dT2
@property
def d2S_dep_dT2_g_V(self):
r'''Second temperature derivative of departure entropy with respect to
temperature at constant volume for the gas phase, [(J/mol)/K^3].
.. math::
\left(\frac{\partial^2 S_{dep, g}}{\partial T^2}\right)_V =
- \frac{R \left(\frac{\partial}{\partial T} P{\left(V,T \right)}
- \frac{P{\left(V,T \right)}}{T}\right) \frac{\partial}{\partial T}
P{\left(V,T \right)}}{P^{2}{\left(V,T \right)}} + \frac{R \left(
\frac{\partial^{2}}{\partial T^{2}} P{\left(V,T \right)} - \frac{2
\frac{\partial}{\partial T} P{\left(V,T \right)}}{T} + \frac{2
P{\left(V,T \right)}}{T^{2}}\right)}{P{\left(V,T \right)}}
+ \frac{R \left(\frac{\partial}{\partial T} P{\left(V,T \right)}
- \frac{P{\left(V,T \right)}}{T}\right)}{T P{\left(V,T \right)}}
+ \frac{2 \operatorname{atanh}{\left(\frac{2 V + \delta}{\sqrt{
\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
V, T, delta, epsilon = self.V_g, self.T, self.delta, self.epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
d2P_dT2 = self.d2P_dT2_g
x0 = 1.0/T
x1 = self.P
P_inv = 1.0/x1
x2 = self.dP_dT_g
x3 = -x0*x1 + x2
x4 = R*P_inv
try:
x5 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x5 = 1e100
return (-R*x2*x3*P_inv*P_inv + x0*x3*x4 + x4*(d2P_dT2 - 2.0*x0*x2
+ 2.0*x1*x0*x0) + 2.0*x5*catanh(x5*(V + V + delta)
).real*d3a_alpha_dT3)
@property
def d2S_dep_dT2_l_V(self):
r'''Second temperature derivative of departure entropy with respect to
temperature at constant volume for the liquid phase, [(J/mol)/K^3].
.. math::
\left(\frac{\partial^2 S_{dep, l}}{\partial T^2}\right)_V =
- \frac{R \left(\frac{\partial}{\partial T} P{\left(V,T \right)}
- \frac{P{\left(V,T \right)}}{T}\right) \frac{\partial}{\partial T}
P{\left(V,T \right)}}{P^{2}{\left(V,T \right)}} + \frac{R \left(
\frac{\partial^{2}}{\partial T^{2}} P{\left(V,T \right)} - \frac{2
\frac{\partial}{\partial T} P{\left(V,T \right)}}{T} + \frac{2
P{\left(V,T \right)}}{T^{2}}\right)}{P{\left(V,T \right)}}
+ \frac{R \left(\frac{\partial}{\partial T} P{\left(V,T \right)}
- \frac{P{\left(V,T \right)}}{T}\right)}{T P{\left(V,T \right)}}
+ \frac{2 \operatorname{atanh}{\left(\frac{2 V + \delta}{\sqrt{
\delta^{2} - 4 \epsilon}} \right)} \frac{d^{3}}{d T^{3}}
\operatorname{a\alpha}{\left(T \right)}}{\sqrt{\delta^{2}
- 4 \epsilon}}
'''
V, T, delta, epsilon = self.V_l, self.T, self.delta, self.epsilon
d2a_alpha_dT2 = self.d2a_alpha_dT2
d3a_alpha_dT3 = self.d3a_alpha_dT3
d2P_dT2 = self.d2P_dT2_l
x0 = 1.0/T
x1 = self.P
P_inv = 1.0/x1
x2 = self.dP_dT_l
x3 = -x0*x1 + x2
x4 = R*P_inv
try:
x5 = (delta*delta - 4.0*epsilon)**-0.5
except ZeroDivisionError:
x5 = 1e100
return (-R*x2*x3*P_inv*P_inv + x0*x3*x4 + x4*(d2P_dT2 - 2.0*x0*x2
+ 2.0*x1*x0*x0) + 2.0*x5*catanh(x5*(V + V + delta)
).real*d3a_alpha_dT3)
@property
def d2H_dep_dTdP_g(self):
r'''Temperature and pressure derivative of departure enthalpy
at constant pressure then temperature for the gas phase,
[(J/mol)/K/Pa].
.. math::
\left(\frac{\partial^2 H_{dep, g}}{\partial T \partial P}\right)_{T, P}
= P \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P \right)}
- \frac{4 T \frac{\partial}{\partial P} V{\left(T,P \right)}
\frac{d^{2}}{d T^{2}} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{\left(\delta
+ 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2} - 4 \epsilon}
- 1\right)} + \frac{16 \left(\delta + 2 V{\left(T,P \right)}\right)
\left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}
- \operatorname{a\alpha}{\left(T \right)}\right) \frac{\partial}
{\partial P} V{\left(T,P \right)} \frac{\partial}{\partial T}
V{\left(T,P \right)}}{\left(\delta^{2} - 4 \epsilon\right)^{2}
\left(\frac{\left(\delta + 2 V{\left(T,P \right)}\right)^{2}}
{\delta^{2} - 4 \epsilon} - 1\right)^{2}} + \frac{\partial}
{\partial T} V{\left(T,P \right)} - \frac{4 \left(T \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)} - \operatorname{a\alpha}
{\left(T \right)}\right) \frac{\partial^{2}}{\partial T\partial P}
V{\left(T,P \right)}}{\left(\delta^{2} - 4 \epsilon\right)
\left(\frac{\left(\delta + 2 V{\left(T,P \right)}\right)^{2}}
{\delta^{2} - 4 \epsilon} - 1\right)}
'''
V, T, P, delta, epsilon = self.V_g, self.T, self.P, self.delta, self.epsilon
dV_dT = self.dV_dT_g
d2V_dTdP = self.d2V_dTdP_g
dV_dP = self.dV_dP_g
a_alpha = self.a_alpha
d2a_alpha_dT2 = self.d2a_alpha_dT2
x5 = delta*delta - 4.0*epsilon
try:
x6 = 1.0/x5
except ZeroDivisionError:
x6 = 1e100
x7 = delta + V + V
x8 = x6*x7*x7 - 1.0
x8_inv = 1.0/x8
x9 = 4.0*x6*x8_inv
x10 = T*self.da_alpha_dT - a_alpha
return (P*d2V_dTdP - T*dV_dP*x9*d2a_alpha_dT2
+ 16.0*dV_dT*x10*dV_dP*x7*x6*x6*x8_inv*x8_inv
+ dV_dT - x10*d2V_dTdP*x9)
@property
def d2H_dep_dTdP_l(self):
r'''Temperature and pressure derivative of departure enthalpy
at constant pressure then temperature for the liquid phase,
[(J/mol)/K/Pa].
.. math::
\left(\frac{\partial^2 H_{dep, l}}{\partial T \partial P}\right)_V
= P \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P \right)}
- \frac{4 T \frac{\partial}{\partial P} V{\left(T,P \right)}
\frac{d^{2}}{d T^{2}} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{\left(\delta
+ 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2} - 4 \epsilon}
- 1\right)} + \frac{16 \left(\delta + 2 V{\left(T,P \right)}\right)
\left(T \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}
- \operatorname{a\alpha}{\left(T \right)}\right) \frac{\partial}
{\partial P} V{\left(T,P \right)} \frac{\partial}{\partial T}
V{\left(T,P \right)}}{\left(\delta^{2} - 4 \epsilon\right)^{2}
\left(\frac{\left(\delta + 2 V{\left(T,P \right)}\right)^{2}}
{\delta^{2} - 4 \epsilon} - 1\right)^{2}} + \frac{\partial}
{\partial T} V{\left(T,P \right)} - \frac{4 \left(T \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)} - \operatorname{a\alpha}
{\left(T \right)}\right) \frac{\partial^{2}}{\partial T\partial P}
V{\left(T,P \right)}}{\left(\delta^{2} - 4 \epsilon\right)
\left(\frac{\left(\delta + 2 V{\left(T,P \right)}\right)^{2}}
{\delta^{2} - 4 \epsilon} - 1\right)}
'''
V, T, P, delta, epsilon = self.V_l, self.T, self.P, self.delta, self.epsilon
dV_dT = self.dV_dT_l
d2V_dTdP = self.d2V_dTdP_l
dV_dP = self.dV_dP_l
a_alpha = self.a_alpha
d2a_alpha_dT2 = self.d2a_alpha_dT2
x5 = delta*delta - 4.0*epsilon
try:
x6 = 1.0/x5
except ZeroDivisionError:
x6 = 1e100
x7 = delta + V + V
x8 = x6*x7*x7 - 1.0
x8_inv = 1.0/x8
x9 = 4.0*x6*x8_inv
x10 = T*self.da_alpha_dT - a_alpha
return (P*d2V_dTdP - T*dV_dP*x9*d2a_alpha_dT2
+ 16.0*dV_dT*x10*dV_dP*x7*x6*x6*x8_inv*x8_inv
+ dV_dT - x10*d2V_dTdP*x9)
@property
def d2S_dep_dTdP_g(self):
r'''Temperature and pressure derivative of departure entropy
at constant pressure then temperature for the gas phase,
[(J/mol)/K^2/Pa].
.. math::
\left(\frac{\partial^2 S_{dep, g}}{\partial T \partial P}\right)_{T, P}
= - \frac{R \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P
\right)}}{V{\left(T,P \right)}} + \frac{R \frac{\partial}{\partial
P} V{\left(T,P \right)} \frac{\partial}{\partial T} V{\left(T,P
\right)}}{V^{2}{\left(T,P \right)}} - \frac{R \frac{\partial^{2}}
{\partial T\partial P} V{\left(T,P \right)}}{b - V{\left(T,P
\right)}} - \frac{R \frac{\partial}{\partial P} V{\left(T,P
\right)} \frac{\partial}{\partial T} V{\left(T,P \right)}}{\left(b
- V{\left(T,P \right)}\right)^{2}} + \frac{16 \left(\delta
+ 2 V{\left(T,P \right)}\right) \frac{\partial}{\partial P}
V{\left(T,P \right)} \frac{\partial}{\partial T} V{\left(T,P
\right)} \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta^{2} - 4 \epsilon\right)^{2} \left(\frac{\left(\delta
+ 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2} - 4 \epsilon}
- 1\right)^{2}} - \frac{4 \frac{\partial}{\partial P} V{\left(T,P
\right)} \frac{d^{2}}{d T^{2}} \operatorname{a\alpha}{\left(T
\right)}}{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{\left(
\delta + 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2}
- 4 \epsilon} - 1\right)} - \frac{4 \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)} \frac{\partial^{2}}
{\partial T\partial P} V{\left(T,P \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T,P
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)}
- \frac{R \left(P \frac{\partial}{\partial P} V{\left(T,P \right)}
+ V{\left(T,P \right)}\right) \frac{\partial}{\partial T}
V{\left(T,P \right)}}{P V^{2}{\left(T,P \right)}} + \frac{R
\left(P \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P
\right)} - \frac{P \frac{\partial}{\partial P} V{\left(T,P
\right)}}{T} + \frac{\partial}{\partial T} V{\left(T,P \right)}
- \frac{V{\left(T,P \right)}}{T}\right)}{P V{\left(T,P \right)}}
+ \frac{R \left(P \frac{\partial}{\partial P} V{\left(T,P \right)}
+ V{\left(T,P \right)}\right)}{P T V{\left(T,P \right)}}
'''
V, T, P, b, delta, epsilon = self.V_g, self.T, self.P, self.b, self.delta, self.epsilon
dV_dT = self.dV_dT_g
d2V_dTdP = self.d2V_dTdP_g
dV_dP = self.dV_dP_g
x0 = V
V_inv = 1.0/V
x2 = d2V_dTdP
x3 = R*x2
x4 = dV_dT
x5 = x4*V_inv*V_inv
x6 = dV_dP
x7 = R*x6
x8 = b - V
x8_inv = 1.0/x8
x9 = 1.0/T
x10 = P*x6
x11 = V + x10
x12 = R/P
x13 = V_inv*x12
x14 = self.a_alpha
x15 = delta*delta - 4.0*epsilon
try:
x16 = 1.0/x15
except ZeroDivisionError:
x16 = 1e100
x17 = delta + V + V
x18 = x16*x17*x17 - 1.0
x50 = 1.0/x18
x19 = 4.0*x16*x50
x20 = self.da_alpha_dT
return (-V_inv*x3 - x11*x12*x5 + x11*x13*x9 + x13*(P*x2 - V*x9 - x10*x9
+ x4) - x19*x2*x20 - x19*x6*self.d2a_alpha_dT2 - x3*x8_inv
- x4*x7*x8_inv*x8_inv + x5*x7
+ 16.0*x17*x20*x4*x6*x16*x16*x50*x50)
@property
def d2S_dep_dTdP_l(self):
r'''Temperature and pressure derivative of departure entropy
at constant pressure then temperature for the liquid phase,
[(J/mol)/K^2/Pa].
.. math::
\left(\frac{\partial^2 S_{dep, l}}{\partial T \partial P}\right)_{T, P}
= - \frac{R \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P
\right)}}{V{\left(T,P \right)}} + \frac{R \frac{\partial}{\partial
P} V{\left(T,P \right)} \frac{\partial}{\partial T} V{\left(T,P
\right)}}{V^{2}{\left(T,P \right)}} - \frac{R \frac{\partial^{2}}
{\partial T\partial P} V{\left(T,P \right)}}{b - V{\left(T,P
\right)}} - \frac{R \frac{\partial}{\partial P} V{\left(T,P
\right)} \frac{\partial}{\partial T} V{\left(T,P \right)}}{\left(b
- V{\left(T,P \right)}\right)^{2}} + \frac{16 \left(\delta
+ 2 V{\left(T,P \right)}\right) \frac{\partial}{\partial P}
V{\left(T,P \right)} \frac{\partial}{\partial T} V{\left(T,P
\right)} \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta^{2} - 4 \epsilon\right)^{2} \left(\frac{\left(\delta
+ 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2} - 4 \epsilon}
- 1\right)^{2}} - \frac{4 \frac{\partial}{\partial P} V{\left(T,P
\right)} \frac{d^{2}}{d T^{2}} \operatorname{a\alpha}{\left(T
\right)}}{\left(\delta^{2} - 4 \epsilon\right) \left(\frac{\left(
\delta + 2 V{\left(T,P \right)}\right)^{2}}{\delta^{2}
- 4 \epsilon} - 1\right)} - \frac{4 \frac{d}{d T}
\operatorname{a\alpha}{\left(T \right)} \frac{\partial^{2}}
{\partial T\partial P} V{\left(T,P \right)}}{\left(\delta^{2}
- 4 \epsilon\right) \left(\frac{\left(\delta + 2 V{\left(T,P
\right)}\right)^{2}}{\delta^{2} - 4 \epsilon} - 1\right)}
- \frac{R \left(P \frac{\partial}{\partial P} V{\left(T,P \right)}
+ V{\left(T,P \right)}\right) \frac{\partial}{\partial T}
V{\left(T,P \right)}}{P V^{2}{\left(T,P \right)}} + \frac{R
\left(P \frac{\partial^{2}}{\partial T\partial P} V{\left(T,P
\right)} - \frac{P \frac{\partial}{\partial P} V{\left(T,P
\right)}}{T} + \frac{\partial}{\partial T} V{\left(T,P \right)}
- \frac{V{\left(T,P \right)}}{T}\right)}{P V{\left(T,P \right)}}
+ \frac{R \left(P \frac{\partial}{\partial P} V{\left(T,P \right)}
+ V{\left(T,P \right)}\right)}{P T V{\left(T,P \right)}}
'''
V, T, P, b, delta, epsilon = self.V_l, self.T, self.P, self.b, self.delta, self.epsilon
dV_dT = self.dV_dT_l
d2V_dTdP = self.d2V_dTdP_l
dV_dP = self.dV_dP_l
x0 = V
V_inv = 1.0/V
x2 = d2V_dTdP
x3 = R*x2
x4 = dV_dT
x5 = x4*V_inv*V_inv
x6 = dV_dP
x7 = R*x6
x8 = b - V
x8_inv = 1.0/x8
x9 = 1.0/T
x10 = P*x6
x11 = V + x10
x12 = R/P
x13 = V_inv*x12
x14 = self.a_alpha
x15 = delta*delta - 4.0*epsilon
try:
x16 = 1.0/x15
except ZeroDivisionError:
x16 = 1e100
x17 = delta + V + V
x18 = x16*x17*x17 - 1.0
x50 = 1.0/x18
x19 = 4.0*x16*x50
x20 = self.da_alpha_dT
return (-V_inv*x3 - x11*x12*x5 + x11*x13*x9 + x13*(P*x2 - V*x9 - x10*x9
+ x4) - x19*x2*x20 - x19*x6*self.d2a_alpha_dT2 - x3*x8_inv
- x4*x7*x8_inv*x8_inv + x5*x7
+ 16.0*x17*x20*x4*x6*x16*x16*x50*x50)
@property
def dfugacity_dT_l(self):
r'''Derivative of fugacity with respect to temperature for the liquid
phase, [Pa/K].
.. math::
\frac{\partial (\text{fugacity})_{l}}{\partial T} = P \left(\frac{1}
{R T} \left(- T \frac{\partial}{\partial T} \operatorname{S_{dep}}
{\left (T,P \right )} - \operatorname{S_{dep}}{\left (T,P \right )}
+ \frac{\partial}{\partial T} \operatorname{H_{dep}}{\left (T,P
\right )}\right) - \frac{1}{R T^{2}} \left(- T \operatorname{
S_{dep}}{\left (T,P \right )} + \operatorname{H_{dep}}{\left
(T,P \right )}\right)\right) e^{\frac{1}{R T} \left(- T
\operatorname{S_{dep}}{\left (T,P \right )} + \operatorname
{H_{dep}}{\left (T,P \right )}\right)}
'''
T, P = self.T, self.P
T_inv = 1.0/T
S_dep_l = self.S_dep_l
x4 = R_inv*(self.H_dep_l - T*S_dep_l)
return P*(T_inv*R_inv*(self.dH_dep_dT_l - T*self.dS_dep_dT_l - S_dep_l)
- x4*T_inv*T_inv)*exp(T_inv*x4)
@property
def dfugacity_dT_g(self):
r'''Derivative of fugacity with respect to temperature for the gas
phase, [Pa/K].
.. math::
\frac{\partial (\text{fugacity})_{g}}{\partial T} = P \left(\frac{1}
{R T} \left(- T \frac{\partial}{\partial T} \operatorname{S_{dep}}
{\left (T,P \right )} - \operatorname{S_{dep}}{\left (T,P \right )}
+ \frac{\partial}{\partial T} \operatorname{H_{dep}}{\left (T,P
\right )}\right) - \frac{1}{R T^{2}} \left(- T \operatorname{
S_{dep}}{\left (T,P \right )} + \operatorname{H_{dep}}{\left
(T,P \right )}\right)\right) e^{\frac{1}{R T} \left(- T
\operatorname{S_{dep}}{\left (T,P \right )} + \operatorname
{H_{dep}}{\left (T,P \right )}\right)}
'''
T, P = self.T, self.P
T_inv = 1.0/T
S_dep_g = self.S_dep_g
x4 = R_inv*(self.H_dep_g - T*S_dep_g)
return P*(T_inv*R_inv*(self.dH_dep_dT_g - T*self.dS_dep_dT_g - S_dep_g)
- x4*T_inv*T_inv)*exp(T_inv*x4)
@property
def dfugacity_dP_l(self):
r'''Derivative of fugacity with respect to pressure for the liquid
phase, [-].
.. math::
\frac{\partial (\text{fugacity})_{l}}{\partial P} = \frac{P}{R T}
\left(- T \frac{\partial}{\partial P} \operatorname{S_{dep}}{\left
(T,P \right )} + \frac{\partial}{\partial P} \operatorname{H_{dep}}
{\left (T,P \right )}\right) e^{\frac{1}{R T} \left(- T
\operatorname{S_{dep}}{\left (T,P \right )} + \operatorname{
H_{dep}}{\left (T,P \right )}\right)} + e^{\frac{1}{R T}
\left(- T \operatorname{S_{dep}}{\left (T,P \right )}
+ \operatorname{H_{dep}}{\left (T,P \right )}\right)}
'''
T, P = self.T, self.P
x0 = 1.0/(R*T)
return (1.0 - P*x0*(T*self.dS_dep_dP_l - self.dH_dep_dP_l))*exp(
-x0*(T*self.S_dep_l - self.H_dep_l))
@property
def dfugacity_dP_g(self):
r'''Derivative of fugacity with respect to pressure for the gas
phase, [-].
.. math::
\frac{\partial (\text{fugacity})_{g}}{\partial P} = \frac{P}{R T}
\left(- T \frac{\partial}{\partial P} \operatorname{S_{dep}}{\left
(T,P \right )} + \frac{\partial}{\partial P} \operatorname{H_{dep}}
{\left (T,P \right )}\right) e^{\frac{1}{R T} \left(- T
\operatorname{S_{dep}}{\left (T,P \right )} + \operatorname{
H_{dep}}{\left (T,P \right )}\right)} + e^{\frac{1}{R T}
\left(- T \operatorname{S_{dep}}{\left (T,P \right )}
+ \operatorname{H_{dep}}{\left (T,P \right )}\right)}
'''
T, P = self.T, self.P
x0 = 1.0/(R*T)
try:
ans = (1.0 - P*x0*(T*self.dS_dep_dP_g - self.dH_dep_dP_g))*exp(
-x0*(T*self.S_dep_g - self.H_dep_g))
if isinf(ans) or isnan(ans):
return 1.0
return ans
except Exception as e:
if P < 1e-50:
# Applies to gas phase only!
return 1.0
else:
raise e
@property
def dphi_dT_l(self):
r'''Derivative of fugacity coefficient with respect to temperature for
the liquid phase, [1/K].
.. math::
\frac{\partial \phi}{\partial T} = \left(\frac{- T \frac{\partial}
{\partial T} \operatorname{S_{dep}}{\left(T,P \right)}
- \operatorname{S_{dep}}{\left(T,P \right)} + \frac{\partial}
{\partial T} \operatorname{H_{dep}}{\left(T,P \right)}}{R T}
- \frac{- T \operatorname{S_{dep}}{\left(T,P \right)}
+ \operatorname{H_{dep}}{\left(T,P \right)}}{R T^{2}}\right)
e^{\frac{- T \operatorname{S_{dep}}{\left(T,P \right)}
+ \operatorname{H_{dep}}{\left(T,P \right)}}{R T}}
'''
T, P = self.T, self.P
T_inv = 1.0/T
x4 = T_inv*(T*self.S_dep_l - self.H_dep_l)
return (-R_inv*T_inv*(T*self.dS_dep_dT_l + self.S_dep_l - x4
- self.dH_dep_dT_l)*exp(-R_inv*x4))
@property
def dphi_dT_g(self):
r'''Derivative of fugacity coefficient with respect to temperature for
the gas phase, [1/K].
.. math::
\frac{\partial \phi}{\partial T} = \left(\frac{- T \frac{\partial}
{\partial T} \operatorname{S_{dep}}{\left(T,P \right)}
- \operatorname{S_{dep}}{\left(T,P \right)} + \frac{\partial}
{\partial T} \operatorname{H_{dep}}{\left(T,P \right)}}{R T}
- \frac{- T \operatorname{S_{dep}}{\left(T,P \right)}
+ \operatorname{H_{dep}}{\left(T,P \right)}}{R T^{2}}\right)
e^{\frac{- T \operatorname{S_{dep}}{\left(T,P \right)}
+ \operatorname{H_{dep}}{\left(T,P \right)}}{R T}}
'''
T, P = self.T, self.P
T_inv = 1.0/T
x4 = T_inv*(T*self.S_dep_g - self.H_dep_g)
return (-R_inv*T_inv*(T*self.dS_dep_dT_g + self.S_dep_g - x4
- self.dH_dep_dT_g)*exp(-R_inv*x4))
@property
def dphi_dP_l(self):
r'''Derivative of fugacity coefficient with respect to pressure for
the liquid phase, [1/Pa].
.. math::
\frac{\partial \phi}{\partial P} = \frac{\left(- T \frac{\partial}
{\partial P} \operatorname{S_{dep}}{\left(T,P \right)}
+ \frac{\partial}{\partial P} \operatorname{H_{dep}}{\left(T,P
\right)}\right) e^{\frac{- T \operatorname{S_{dep}}{\left(T,P
\right)} + \operatorname{H_{dep}}{\left(T,P \right)}}{R T}}}{R T}
'''
T = self.T
x0 = self.S_dep_l
x1 = self.H_dep_l
x2 = 1.0/(R*T)
return -x2*(T*self.dS_dep_dP_l - self.dH_dep_dP_l)*exp(-x2*(T*x0 - x1))
@property
def dphi_dP_g(self):
r'''Derivative of fugacity coefficient with respect to pressure for
the gas phase, [1/Pa].
.. math::
\frac{\partial \phi}{\partial P} = \frac{\left(- T \frac{\partial}
{\partial P} \operatorname{S_{dep}}{\left(T,P \right)}
+ \frac{\partial}{\partial P} \operatorname{H_{dep}}{\left(T,P
\right)}\right) e^{\frac{- T \operatorname{S_{dep}}{\left(T,P
\right)} + \operatorname{H_{dep}}{\left(T,P \right)}}{R T}}}{R T}
'''
T = self.T
x0 = self.S_dep_g
x1 = self.H_dep_g
x2 = 1.0/(R*T)
return -x2*(T*self.dS_dep_dP_g - self.dH_dep_dP_g)*exp(-x2*(T*x0 - x1))
@property
def dbeta_dT_g(self):
r'''Derivative of isobaric expansion coefficient with respect to
temperature for the gas phase, [1/K^2].
.. math::
\frac{\partial \beta_g}{\partial T} = \frac{\frac{\partial^{2}}
{\partial T^{2}} V{\left (T,P \right )_g}}{V{\left (T,P \right )_g}} -
\frac{\left(\frac{\partial}{\partial T} V{\left (T,P \right )_g}
\right)^{2}}{V^{2}{\left (T,P \right )_g}}
'''
V_inv = 1.0/self.V_g
dV_dT = self.dV_dT_g
return V_inv*(self.d2V_dT2_g - dV_dT*dV_dT*V_inv)
@property
def dbeta_dT_l(self):
r'''Derivative of isobaric expansion coefficient with respect to
temperature for the liquid phase, [1/K^2].
.. math::
\frac{\partial \beta_l}{\partial T} = \frac{\frac{\partial^{2}}
{\partial T^{2}} V{\left (T,P \right )_l}}{V{\left (T,P \right )_l}} -
\frac{\left(\frac{\partial}{\partial T} V{\left (T,P \right )_l}
\right)^{2}}{V^{2}{\left (T,P \right )_l}}
'''
V_inv = 1.0/self.V_l
dV_dT = self.dV_dT_l
return V_inv*(self.d2V_dT2_l - dV_dT*dV_dT*V_inv)
@property
def dbeta_dP_g(self):
r'''Derivative of isobaric expansion coefficient with respect to
pressure for the gas phase, [1/(Pa*K)].
.. math::
\frac{\partial \beta_g}{\partial P} = \frac{\frac{\partial^{2}}
{\partial T\partial P} V{\left (T,P \right )_g}}{V{\left (T,
P \right )_g}} - \frac{\frac{\partial}{\partial P} V{\left (T,P
\right )_g} \frac{\partial}{\partial T} V{\left (T,P \right )_g}}
{V^{2}{\left (T,P \right )_g}}
'''
V_inv = 1.0/self.V_g
dV_dT = self.dV_dT_g
dV_dP = self.dV_dP_g
return V_inv*(self.d2V_dTdP_g - dV_dT*dV_dP*V_inv)
@property
def dbeta_dP_l(self):
r'''Derivative of isobaric expansion coefficient with respect to
pressure for the liquid phase, [1/(Pa*K)].
.. math::
\frac{\partial \beta_g}{\partial P} = \frac{\frac{\partial^{2}}
{\partial T\partial P} V{\left (T,P \right )_l}}{V{\left (T,
P \right )_l}} - \frac{\frac{\partial}{\partial P} V{\left (T,P
\right )_l} \frac{\partial}{\partial T} V{\left (T,P \right )_l}}
{V^{2}{\left (T,P \right )_l}}
'''
V_inv = 1.0/self.V_l
dV_dT = self.dV_dT_l
dV_dP = self.dV_dP_l
return V_inv*(self.d2V_dTdP_l - dV_dT*dV_dP*V_inv)
@property
def da_alpha_dP_g_V(self):
r'''Derivative of the `a_alpha` with respect to
pressure at constant volume (varying T) for the gas phase,
[J^2/mol^2/Pa^2].
.. math::
\left(\frac{\partial a \alpha}{\partial P}\right)_{V}
= \left(\frac{\partial a \alpha}{\partial T}\right)_{P}
\cdot\left( \frac{\partial T}{\partial P}\right)_V
'''
return self.da_alpha_dT*self.dT_dP_g
@property
def da_alpha_dP_l_V(self):
r'''Derivative of the `a_alpha` with respect to
pressure at constant volume (varying T) for the liquid phase,
[J^2/mol^2/Pa^2].
.. math::
\left(\frac{\partial a \alpha}{\partial P}\right)_{V}
= \left(\frac{\partial a \alpha}{\partial T}\right)_{P}
\cdot\left( \frac{\partial T}{\partial P}\right)_V
'''
return self.da_alpha_dT*self.dT_dP_l
@property
def d2a_alpha_dTdP_g_V(self):
r'''Derivative of the temperature derivative of `a_alpha` with respect
to pressure at constant volume (varying T) for the gas phase,
[J^2/mol^2/Pa^2/K].
.. math::
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}
= \left(\frac{\partial^2 a \alpha}{\partial T^2}\right)_{P}
\cdot\left( \frac{\partial T}{\partial P}\right)_V
'''
return self.d2a_alpha_dT2*self.dT_dP_g
@property
def d2a_alpha_dTdP_l_V(self):
r'''Derivative of the temperature derivative of `a_alpha` with respect
to pressure at constant volume (varying T) for the liquid phase,
[J^2/mol^2/Pa^2/K].
.. math::
\left(\frac{\partial \left(\frac{\partial a \alpha}{\partial T}
\right)_P}{\partial P}\right)_{V}
= \left(\frac{\partial^2 a \alpha}{\partial T^2}\right)_{P}
\cdot\left( \frac{\partial T}{\partial P}\right)_V
'''
return self.d2a_alpha_dT2*self.dT_dP_l
@property
def d2P_dVdP_g(self):
r'''Second derivative of pressure with respect to molar volume and
then pressure for the gas phase, [mol/m^3].
.. math::
\frac{\partial^2 P}{\partial V \partial P} =
\frac{2 R T \frac{d}{d P} V{\left(P \right)}}{\left(- b + V{\left(P
\right)}\right)^{3}} - \frac{\left(- \delta - 2 V{\left(P \right)}
\right) \left(- 2 \delta \frac{d}{d P} V{\left(P \right)}
- 4 V{\left(P \right)} \frac{d}{d P} V{\left(P \right)}\right)
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta V{\left(P
\right)} + \epsilon + V^{2}{\left(P \right)}\right)^{3}} + \frac{2
\operatorname{a\alpha}{\left(T \right)} \frac{d}{d P} V{\left(P
\right)}}{\left(\delta V{\left(P \right)} + \epsilon + V^{2}
{\left(P \right)}\right)^{2}}
'''
r'''Feels like a really strange derivative. Have not been able to construct
it from others yet. Value is Symmetric - can calculate it both ways.
Still feels like there should be a general method for obtaining these derivatives.
from sympy import *
P, T, R, b, delta, epsilon = symbols('P, T, R, b, delta, epsilon')
a_alpha, V = symbols(r'a\alpha, V', cls=Function)
dP_dV = 1/(1/(-R*T/(V(P) - b)**2 - a_alpha(T)*(-2*V(P) - delta)/(V(P)**2 + V(P)*delta + epsilon)**2))
cse(diff(dP_dV, P), optimizations='basic')
'''
T, P, b, delta, epsilon = self.T, self.P, self.b, self.delta, self.epsilon
x0 = self.V_g
x1 = self.a_alpha
x2 = delta*x0 + epsilon + x0*x0
x50 = self.dV_dP_g
x51 = x0 + x0 + delta
x52 = 1.0/(b - x0)
x2_inv = 1.0/x2
return 2.0*(-R*T*x52*x52*x52 + x1*x2_inv*x2_inv*(1.0 - x51*x51*x2_inv))*x50
@property
def d2P_dVdP_l(self):
r'''Second derivative of pressure with respect to molar volume and
then pressure for the liquid phase, [mol/m^3].
.. math::
\frac{\partial^2 P}{\partial V \partial P} =
\frac{2 R T \frac{d}{d P} V{\left(P \right)}}{\left(- b + V{\left(P
\right)}\right)^{3}} - \frac{\left(- \delta - 2 V{\left(P \right)}
\right) \left(- 2 \delta \frac{d}{d P} V{\left(P \right)}
- 4 V{\left(P \right)} \frac{d}{d P} V{\left(P \right)}\right)
\operatorname{a\alpha}{\left(T \right)}}{\left(\delta V{\left(P
\right)} + \epsilon + V^{2}{\left(P \right)}\right)^{3}} + \frac{2
\operatorname{a\alpha}{\left(T \right)} \frac{d}{d P} V{\left(P
\right)}}{\left(\delta V{\left(P \right)} + \epsilon + V^{2}
{\left(P \right)}\right)^{2}}
'''
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
x0 = self.V_l
x1 = self.a_alpha
x2 = delta*x0 + epsilon + x0*x0
x50 = self.dV_dP_l
x51 = x0 + x0 + delta
x52 = 1.0/(b - x0)
x2_inv = 1.0/x2
return 2.0*(-R*T*x52*x52*x52 + x1*x2_inv*x2_inv*(1.0 - x51*x51*x2_inv))*x50
@property
def d2P_dVdT_TP_g(self):
r'''Second derivative of pressure with respect to molar volume and
then temperature at constant temperature then pressure for the gas
phase, [Pa*mol/m^3/K].
.. math::
\left(\frac{\partial^2 P}{\partial V \partial T}\right)_{T,P} =
\frac{2 R T \frac{d}{d T} V{\left(T \right)}}{\left(- b + V{\left(T
\right)}\right)^{3}} - \frac{R}{\left(- b + V{\left(T \right)}
\right)^{2}} - \frac{\left(- \delta - 2 V{\left(T \right)}\right)
\left(- 2 \delta \frac{d}{d T} V{\left(T \right)} - 4 V{\left(T
\right)} \frac{d}{d T} V{\left(T \right)}\right) \operatorname{
a\alpha}{\left(T \right)}}{\left(\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}\right)^{3}} - \frac{\left(
- \delta - 2 V{\left(T \right)}\right) \frac{d}{d T} \operatorname{
a\alpha}{\left(T \right)}}{\left(\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}\right)^{2}} + \frac{2
\operatorname{a\alpha}{\left(T \right)} \frac{d}{d T} V{\left(T
\right)}}{\left(\delta V{\left(T \right)} + \epsilon + V^{2}{\left(
T \right)}\right)^{2}}
'''
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
x0 = self.V_g
x2 = 2.0*self.dV_dT_g
x1 = self.b - x0
x1_inv = 1.0/x1
x3 = delta*x0 + epsilon + x0*x0
x3_inv = 1.0/x3
x4 = x3_inv*x3_inv
x5 = self.a_alpha
x6 = x2*x5
x7 = delta + x0 + x0
return (-x1_inv*x1_inv*R*(T*x2*x1_inv + 1.0) + x4*x6
+ x4*x7*(self.da_alpha_dT - x6*x7*x3_inv))
@property
def d2P_dVdT_TP_l(self):
r'''Second derivative of pressure with respect to molar volume and
then temperature at constant temperature then pressure for the liquid
phase, [Pa*mol/m^3/K].
.. math::
\left(\frac{\partial^2 P}{\partial V \partial T}\right)_{T,P} =
\frac{2 R T \frac{d}{d T} V{\left(T \right)}}{\left(- b + V{\left(T
\right)}\right)^{3}} - \frac{R}{\left(- b + V{\left(T \right)}
\right)^{2}} - \frac{\left(- \delta - 2 V{\left(T \right)}\right)
\left(- 2 \delta \frac{d}{d T} V{\left(T \right)} - 4 V{\left(T
\right)} \frac{d}{d T} V{\left(T \right)}\right) \operatorname{
a\alpha}{\left(T \right)}}{\left(\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}\right)^{3}} - \frac{\left(
- \delta - 2 V{\left(T \right)}\right) \frac{d}{d T} \operatorname{
a\alpha}{\left(T \right)}}{\left(\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}\right)^{2}} + \frac{2
\operatorname{a\alpha}{\left(T \right)} \frac{d}{d T} V{\left(T
\right)}}{\left(\delta V{\left(T \right)} + \epsilon + V^{2}{\left(
T \right)}\right)^{2}}
'''
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
x0 = self.V_l
x2 = 2.0*self.dV_dT_l
x1 = self.b - x0
x1_inv = 1.0/x1
x3 = delta*x0 + epsilon + x0*x0
x3_inv = 1.0/x3
x4 = x3_inv*x3_inv
x5 = self.a_alpha
x6 = x2*x5
x7 = delta + x0 + x0
return (-x1_inv*x1_inv*R*(T*x2*x1_inv + 1.0) + x4*x6
+ x4*x7*(self.da_alpha_dT - x6*x7*x3_inv))
@property
def d2P_dT2_PV_g(self):
r'''Second derivative of pressure with respect to temperature twice,
but with pressure held constant the first time and volume held
constant the second time for the gas phase, [Pa/K^2].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial T}\right)_{P,V} =
- \frac{R \frac{d}{d T} V{\left(T \right)}}{\left(- b + V{\left(T
\right)}\right)^{2}} - \frac{\left(- \delta \frac{d}{d T} V{\left(T
\right)} - 2 V{\left(T \right)} \frac{d}{d T} V{\left(T \right)}
\right) \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta V{\left(T \right)} + \epsilon + V^{2}{\left(T
\right)}\right)^{2}} - \frac{\frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}}
'''
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
V = self.V_g
dV_dT = self.dV_dT_g
x2 = self.a_alpha
x0 = V
x1 = dV_dT
x3 = delta*x0 + epsilon + x0*x0
x3_inv = 1.0/x3
x50 = 1.0/(b - x0)
return (-R*x1*x50*x50 + x1*(delta + x0 + x0)*self.da_alpha_dT*x3_inv*x3_inv - self.d2a_alpha_dT2*x3_inv)
@property
def d2P_dT2_PV_l(self):
r'''Second derivative of pressure with respect to temperature twice,
but with pressure held constant the first time and volume held
constant the second time for the liquid phase, [Pa/K^2].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial T}\right)_{P,V} =
- \frac{R \frac{d}{d T} V{\left(T \right)}}{\left(- b + V{\left(T
\right)}\right)^{2}} - \frac{\left(- \delta \frac{d}{d T} V{\left(T
\right)} - 2 V{\left(T \right)} \frac{d}{d T} V{\left(T \right)}
\right) \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta V{\left(T \right)} + \epsilon + V^{2}{\left(T
\right)}\right)^{2}} - \frac{\frac{d^{2}}{d T^{2}}
\operatorname{a\alpha}{\left(T \right)}}{\delta V{\left(T \right)}
+ \epsilon + V^{2}{\left(T \right)}}
'''
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
V = self.V_l
dV_dT = self.dV_dT_l
x0 = V
x1 = dV_dT
x2 = self.a_alpha
x3 = delta*x0 + epsilon + x0*x0
x3_inv = 1.0/x3
x50 = 1.0/(b - x0)
return (-R*x1*x50*x50 + x1*(delta + x0 + x0)*self.da_alpha_dT*x3_inv*x3_inv - self.d2a_alpha_dT2*x3_inv)
@property
def d2P_dTdP_g(self):
r'''Second derivative of pressure with respect to temperature and,
then pressure; and with volume held constant at first, then temperature,
for the gas phase, [1/K].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial P}\right)_{V, T} =
- \frac{R \frac{d}{d P} V{\left(P \right)}}{\left(- b + V{\left(P
\right)}\right)^{2}} - \frac{\left(- \delta \frac{d}{d P} V{\left(P
\right)} - 2 V{\left(P \right)} \frac{d}{d P} V{\left(P \right)}
\right) \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta V{\left(P \right)} + \epsilon + V^{2}{\left(P
\right)}\right)^{2}}
'''
V = self.V_g
dV_dP = self.dV_dP_g
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
da_alpha_dT = self.da_alpha_dT
x0 = V - b
x1 = delta*V + epsilon + V*V
return (-R*dV_dP/(x0*x0) - (-delta*dV_dP - 2.0*V*dV_dP)*da_alpha_dT/(x1*x1))
@property
def d2P_dTdP_l(self):
r'''Second derivative of pressure with respect to temperature and,
then pressure; and with volume held constant at first, then temperature,
for the liquid phase, [1/K].
.. math::
\left(\frac{\partial^2 P}{\partial T \partial P}\right)_{V, T} =
- \frac{R \frac{d}{d P} V{\left(P \right)}}{\left(- b + V{\left(P
\right)}\right)^{2}} - \frac{\left(- \delta \frac{d}{d P} V{\left(P
\right)} - 2 V{\left(P \right)} \frac{d}{d P} V{\left(P \right)}
\right) \frac{d}{d T} \operatorname{a\alpha}{\left(T \right)}}
{\left(\delta V{\left(P \right)} + \epsilon + V^{2}{\left(P
\right)}\right)^{2}}
'''
V = self.V_l
dV_dP = self.dV_dP_l
T, b, delta, epsilon = self.T, self.b, self.delta, self.epsilon
da_alpha_dT = self.da_alpha_dT
x0 = V - b
x1 = delta*V + epsilon + V*V
return (-R*dV_dP/(x0*x0) - (-delta*dV_dP - 2.0*V*dV_dP)*da_alpha_dT/(x1*x1))
@property
def lnphi_l(self):
r'''The natural logarithm of the fugacity coefficient for
the liquid phase, [-].
'''
return self.G_dep_l*R_inv/self.T
@property
def lnphi_g(self):
r'''The natural logarithm of the fugacity coefficient for
the gas phase, [-].
'''
return log(self.phi_g)
class IG(GCEOS):
r'''Class for solving the ideal gas equation in the `GCEOS` framework.
This provides access to a number of derivatives and properties easily.
It also keeps a common interface for all gas models. However, it is
somewhat slow.
Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS; values for `Tc` and
`Pc` and `omega`, which are not used in the calculates, are set to those of
methane by default to allow use without specifying them.
.. math::
P = \frac{RT}{V}
Parameters
----------
Tc : float, optional
Critical temperature, [K]
Pc : float, optional
Critical pressure, [Pa]
omega : float, optional
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
T-P initialization, and exploring each phase's properties:
>>> eos = IG(T=400., P=1E6)
>>> eos.V_g, eos.phase
(0.003325785047261296, 'g')
>>> eos.H_dep_g, eos.S_dep_g, eos.U_dep_g, eos.G_dep_g, eos.A_dep_g
(0.0, 0.0, 0.0, 0.0, 0.0)
>>> eos.beta_g, eos.kappa_g, eos.Cp_dep_g, eos.Cv_dep_g
(0.0025, 1e-06, 0.0, 0.0)
>>> eos.fugacity_g, eos.PIP_g, eos.Z_g, eos.dP_dT_g
(1000000.0, 0.9999999999999999, 1.0, 2500.0)
Notes
-----
References
----------
.. [1] Smith, J. M, H. C Van Ness, and Michael M Abbott. Introduction to
Chemical Engineering Thermodynamics. Boston: McGraw-Hill, 2005.
'''
Zc = 1.0
'''float: Critical compressibility for an ideal gas is 1'''
a = 0.0
'''float: `a` parameter for an ideal gas is 0'''
b = 0.0
'''float: `b` parameter for an ideal gas is 0'''
delta = 0.0
'''float: `delta` parameter for an ideal gas is 0'''
epsilon = 0.0
'''float: `epsilon` parameter for an ideal gas is 0'''
volume_solutions = staticmethod(volume_solutions_ideal)
# Handle the properties where numerical error puts values - but they should
# be zero. Not all of them are non-zero all the time - but some times
# they are
def _zero(self): return 0.0
def _set_nothing(self, thing): return
try:
try:
doc = GCEOS.d2T_dV2_g.__doc__
except:
doc = ''
d2T_dV2_g = property(_zero, fset=_set_nothing, doc=doc)
try:
doc = GCEOS.d2V_dT2_g.__doc__
except:
doc = ''
d2V_dT2_g = property(_zero, fset=_set_nothing, doc=doc)
try:
doc = GCEOS.U_dep_g.__doc__
except:
doc = ''
U_dep_g = property(_zero, fset=_set_nothing, doc=doc)
try:
doc = GCEOS.A_dep_g.__doc__
except:
doc = ''
A_dep_g = property(_zero, fset=_set_nothing, doc=doc)
try:
doc = GCEOS.V_dep_g.__doc__
except:
doc = ''
V_dep_g = property(_zero, fset=_set_nothing, doc=doc)
G_dep_g = property(_zero, fset=_set_nothing, doc='Departure Gibbs free energy of an ideal gas is zero, [J/(mol)]')
H_dep_g = property(_zero, fset=_set_nothing, doc='Departure enthalpy of an ideal gas is zero, [J/(mol)]')
S_dep_g = property(_zero, fset=_set_nothing, doc='Departure entropy of an ideal gas is zero, [J/(mol*K)]')
Cp_dep_g = property(_zero, fset=_set_nothing, doc='Departure heat capacity of an ideal gas is zero, [J/(mol*K)]')
# Replace methods
dH_dep_dP_g = property(_zero, doc=GCEOS.dH_dep_dP_g.__doc__)
dH_dep_dT_g = property(_zero, doc=GCEOS.dH_dep_dT_g.__doc__)
dS_dep_dP_g = property(_zero, doc=GCEOS.dS_dep_dP_g.__doc__)
dS_dep_dT_g = property(_zero, doc=GCEOS.dS_dep_dT_g.__doc__)
dfugacity_dT_g = property(_zero, doc=GCEOS.dfugacity_dT_g.__doc__)
dphi_dP_g = property(_zero, doc=GCEOS.dphi_dP_g.__doc__)
dphi_dT_g = property(_zero, doc=GCEOS.dphi_dT_g.__doc__)
except:
pass
def __init__(self, Tc=None, Pc=None, omega=None, T=None, P=None,
V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.solve()
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. All values are zero.
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
return (0.0, 0.0, 0.0)
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for the ideal gas law, which
is zero.
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return 0.0
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the
ideal gas equation of state.
.. math::
T = \frac{PV}{R}
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
Not used, [-]
Returns
-------
T : float
Temperature, [K]
Notes
-----
'''
self.no_T_spec = True
return P*V*R_inv
def phi_sat(self, T, polish=True):
return 1.0
def dphi_sat_dT(self, T, polish=True):
return 0.0
def d2phi_sat_dT2(self, T, polish=True):
return 0.0
class PR(GCEOS):
r'''Class for solving the Peng-Robinson [1]_ [2]_ cubic
equation of state for a pure compound. Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
The main methods here are :obj:`PR.a_alpha_and_derivatives_pure`, which calculates
:math:`a \alpha` and its first and second derivatives, and :obj:`PR.solve_T`, which from a
specified `P` and `V` obtains `T`.
Two of (`T`, `P`, `V`) are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa=0.37464+1.54226\omega-0.26992\omega^2
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
T-P initialization, and exploring each phase's properties:
>>> eos = PR(Tc=507.6, Pc=3025000.0, omega=0.2975, T=400., P=1E6)
>>> eos.V_l, eos.V_g
(0.000156073184785, 0.0021418768167)
>>> eos.phase
'l/g'
>>> eos.H_dep_l, eos.H_dep_g
(-26111.8775716, -3549.30057795)
>>> eos.S_dep_l, eos.S_dep_g
(-58.098447843, -6.4394518931)
>>> eos.U_dep_l, eos.U_dep_g
(-22942.1657091, -2365.3923474)
>>> eos.G_dep_l, eos.G_dep_g
(-2872.49843435, -973.51982071)
>>> eos.A_dep_l, eos.A_dep_g
(297.21342811, 210.38840980)
>>> eos.beta_l, eos.beta_g
(0.00269337091778, 0.0101232239111)
>>> eos.kappa_l, eos.kappa_g
(9.3357215438e-09, 1.97106698097e-06)
>>> eos.Cp_minus_Cv_l, eos.Cp_minus_Cv_g
(48.510162249, 44.544161128)
>>> eos.Cv_dep_l, eos.Cp_dep_l
(18.8921126734, 59.0878123050)
P-T initialization, liquid phase, and round robin trip:
>>> eos = PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000130222125139, -31134.75084, -72.47561931)
T-V initialization, liquid phase:
>>> eos2 = PR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., V=eos.V_l)
>>> eos2.P, eos2.phase
(1000000.00, 'l')
P-V initialization at same state:
>>> eos3 = PR(Tc=507.6, Pc=3025000, omega=0.2975, V=eos.V_l, P=1E6)
>>> eos3.T, eos3.phase
(299.0000000000, 'l')
Notes
-----
The constants in the expresions for `a` and `b` are given to full precision
in the actual code, as derived in [3]_.
The full expression for critical compressibility is:
.. math::
Z_c = \frac{1}{32} \left(\sqrt[3]{16 \sqrt{2}-13}-\frac{7}{\sqrt[3]
{16 \sqrt{2}-13}}+11\right)
References
----------
.. [1] Peng, Ding-Yu, and Donald B. Robinson. "A New Two-Constant Equation
of State." Industrial & Engineering Chemistry Fundamentals 15, no. 1
(February 1, 1976): 59-64. doi:10.1021/i160057a011.
.. [2] Robinson, Donald B., Ding-Yu Peng, and Samuel Y-K Chung. "The
Development of the Peng - Robinson Equation and Its Application to Phase
Equilibrium in a System Containing Methanol." Fluid Phase Equilibria 24,
no. 1 (January 1, 1985): 25-41. doi:10.1016/0378-3812(85)87035-7.
.. [3] Privat, R., and J.-N. Jaubert. "PPR78, a Thermodynamic Model for the
Prediction of Petroleum Fluid-Phase Behaviour," 11. EDP Sciences, 2011.
doi:10.1051/jeep/201100011.
'''
# constant part of `a`,
# X = (-1 + (6*sqrt(2)+8)**Rational(1,3) - (6*sqrt(2)-8)**Rational(1,3))/3
# (8*(5*X+1)/(49-37*X)).evalf(40)
c1 = 0.4572355289213821893834601962251837888504
'''Full value of the constant in the `a` parameter'''
c1R2 = c1*R2
# Constant part of `b`, (X/(X+3)).evalf(40)
c2 = 0.0777960739038884559718447100373331839711
'''Full value of the constant in the `b` parameter'''
c2R = c2*R
c1R2_c2R = c1R2/c2R
# c1, c2 = 0.45724, 0.07780
# Zc is the mechanical compressibility for mixtures as well.
Zc = 0.3074013086987038480093850966542222720096
'''Mechanical compressibility of Peng-Robinson EOS'''
Psat_coeffs_limiting = [-3.4758880164801873, 0.7675486448347723]
Psat_coeffs_critical = [13.906174756604267, -8.978515559640332,
6.191494729386664, -3.3553014047359286,
1.0000000000011509]
Psat_cheb_coeffs = [-7.693430141477579, -7.792157693145173, -0.12584439451814622, 0.0045868660863990305,
0.011902728116315585, -0.00809984848593371, 0.0035807374586641324, -0.001285457896498948,
0.0004379441379448949, -0.0001701325511665626, 7.889450459420399e-05, -3.842330780886875e-05,
1.7884847876342805e-05, -7.9432179091441e-06, 3.51726370898656e-06, -1.6108797741557683e-06,
7.625638345550717e-07, -3.6453554523813245e-07, 1.732454904858089e-07, -8.195124459058523e-08,
3.8929380082904216e-08, -1.8668536344161905e-08, 9.021955971552252e-09, -4.374277331168795e-09,
2.122697092724708e-09, -1.0315557015083254e-09, 5.027805333255708e-10, -2.4590905784642285e-10,
1.206301486380689e-10, -5.932583414867791e-11, 2.9274476912683964e-11, -1.4591650777202522e-11,
7.533835507484918e-12, -4.377200831613345e-12, 1.7413208326438542e-12]
# below - down to .14 Tr
# Psat_cheb_coeffs = [-69.78144560030312, -70.82020621910401, -0.5505993362058134, 0.262763240774557, -0.13586962327984622, 0.07091484524874882, -0.03531507189835045, 0.015348266653126313, -0.004290800414097142, -0.0015192254949775404, 0.004230003950690049, -0.005148646330256051, 0.005067979846360524, -0.004463618393006094, 0.0036338412594165456, -0.002781745442601943, 0.0020410583004693912, -0.0014675469823800154, 0.001041797382518202, -0.0007085008245359792, 0.0004341450533632967, -0.00023059133991796472, 0.00012404966848973944, -0.00010575986390189084, 0.00011927874294723816, -0.00010216011382070127, 4.142986825089964e-05, 1.6994654942134455e-05, -2.0393896226146606e-05, -3.05495184394464e-05, 7.840494892004187e-05, -6.715144915784917e-05, 1.9360256298218764e-06, 5.342823303794287e-05, -4.2445268102696054e-05, -2.258059184830652e-05, 7.156133295478447e-05, -5.0419963297068014e-05, -2.1185333936025785e-05, 6.945722167248469e-05, -4.3468774802286496e-05, -3.0211658906858938e-05, 7.396450066832002e-05, -4.0987041756199036e-05, -3.4507186813052766e-05, 3.6619358939125855e-05]
# down to .05 Tr
# Psat_cheb_coeffs = [-71.62442148475718, -72.67946752713178, -0.5550432977559888, 0.2662527679044299, -0.13858385912471755, 0.07300013042829502, -0.03688566755461173, 0.01648745160444604, -0.005061858504315144, -0.0010519595693067093, 0.0039868988560367085, -0.005045456840770146, 0.00504419254495023, -0.0044982000664379905, 0.003727506855649437, -0.002922838794275898, 0.0021888012528213734, -0.0015735578492615076, 0.0010897606359061226, -0.0007293553555925913, 0.0004738606767778966, -0.00030120118607927907, 0.00018992197213856394, -0.00012147385378832608, 8.113736696036817e-05, -5.806550163389163e-05, 4.4822397778703055e-05, -3.669084579413651e-05, 3.0945466319478186e-05, -2.62003968013127e-05, 2.1885122184587654e-05, -1.786717828032663e-05, 1.420082721312861e-05, -1.0981475209780111e-05, 8.276527284992199e-06, -6.100440122314813e-06, 4.420342273408809e-06, -3.171239452318529e-06, 2.2718591475182304e-06, -1.641149583754854e-06, 1.2061284404980935e-06, -9.067266070702959e-07, 6.985214276328142e-07, -5.490755862981909e-07, 4.372991567070929e-07, -3.504743494298746e-07, 2.8019662848682576e-07, -2.2266768846404626e-07, 1.7533403880408145e-07, -1.3630227589226426e-07, 1.0510214144142285e-07, -8.02098792008235e-08, 6.073935683412093e-08, -4.6105511380996746e-08, 3.478599121821662e-08, -2.648029023793574e-08, 2.041302301328165e-08, -1.5671212844805128e-08, 1.2440282394539782e-08, -9.871977759603047e-09, 7.912503992331811e-09, -6.6888910721434e-09, 5.534654087073205e-09, -4.92019981055108e-09, 4.589363968756223e-09, -2.151778718334702e-09]
# down to .05 Tr polishing
# Psat_cheb_coeffs = [-73.9119088855554, -74.98674794418481, -0.5603678572345178, 0.2704608002227193, -0.1418754021264281, 0.07553218818095526, -0.03878657980070652, 0.017866520164384912, -0.0060152224341743525, -0.0004382750653244775, 0.003635841462596336, -0.004888955750612924, 0.005023631814771542, -0.004564880757514128, 0.003842769402817585, -0.0030577040987875793, 0.0023231191552369407, -0.001694755295849508, 0.0011913577693282759, -0.0008093955530850967, 0.0005334402485338361, -0.0003431831424850387, 0.00021792836239828482, -0.00013916167527852, 9.174638441139245e-05, -6.419699908390207e-05, 4.838277855408256e-05, -3.895686370452493e-05, 3.267491660000825e-05, -2.7780478658642705e-05, 2.3455257030895833e-05, -1.943068869205973e-05, 1.5702249378726904e-05, -1.2352834841441616e-05, 9.468188716352547e-06, -7.086815965689662e-06, 5.202794456673999e-06, -3.7660662091643354e-06, 2.710802447723022e-06, -1.9547001517481854e-06, 1.4269579917305496e-06, -1.0627333211922062e-06, 8.086972219940435e-07, -6.313736088052035e-07, 5.002098614800398e-07, -4.014517222719182e-07, 3.222357369727768e-07, -2.591706410738203e-07, 2.0546606649125658e-07, -1.6215902481453263e-07, 1.2645321295092458e-07, -9.678506993483597e-08, 7.52490799383037e-08, -5.60685972986457e-08, 4.3358661542007224e-08, -3.2329350971261814e-08, 2.5091238603112617e-08, -1.8903964302567286e-08, 1.4892047699817043e-08, -1.1705624527623068e-08, 8.603302527636011e-09, -7.628847828412486e-09, 5.0543164590698825e-09, -5.102159698856454e-09, 3.0709992836479988e-09, -2.972533529000884e-09, 2.0494601230946347e-09, -1.626141536313283e-09, 1.6617716853181003e-09, -6.470653307871083e-10, 1.1333690091031717e-09, -1.2451614782651999e-10, 1.098942683163892e-09, 9.673645066411718e-11, 6.206934530152836e-10, -1.1913910201270805e-10, 3.559906774745769e-11, -5.419942764994107e-10, -2.372580701782284e-10, -5.785415972247437e-10, -1.789757696430208e-10]
# down to .05 with lots of failures C40 only
# Psat_cheb_coeffs = [-186.30264784196294, -188.01235085131194, -0.6975588305160902, 0.38422679790906106, -0.2358303051434559, 0.15258449381119304, -0.101338177792044, 0.0679573457611134, -0.045425247476661136, 0.029879338234709937, -0.019024330378443737, 0.011418999154577504, -0.006113230472632388, 0.00246054797767154, -4.3960533109688155e-06, -0.0015825897164979809, 0.002540504992834563, -0.003046881596822211, 0.0032353807402903272, -0.0032061955400497044, 0.0030337264005811464, -0.0027744314554593126, 0.002469806934918433, -0.002149376765619085, 0.001833408492489406, -0.00153552022142691, 0.0012645817528752557, -0.0010249792000921317, 0.0008181632585418055, -0.0006436998283177283, 0.0004995903113614604, -0.0003828408287994695, 0.0002896812774307662, -0.00021674416012176133, 0.00016131784370737042, -0.00012009195488808489, 8.966908457382076e-05, -6.764450681363164e-05, 5.209192773849304e-05, -4.1139971086693995e-05, 3.3476318185800505e-05, -2.8412997762476805e-05, 2.513421113263226e-05, -2.2567508719078435e-05, 2.0188809493379843e-05, -1.810962700274516e-05, 1.643508229137845e-05, -1.503569055933669e-05, 1.3622272823701577e-05, -1.2076671646564277e-05, 1.054271875585668e-05, -9.007273271254411e-06, 7.523720857264602e-06, -6.424404525130439e-06, 5.652203861001342e-06, -4.7755499168431625e-06, 3.7604252783225858e-06, -2.92395389072605e-06, 2.3520802660480336e-06, -1.9209673206999083e-06, 1.6125790706312328e-06, -1.4083468032508143e-06, 1.1777450938630518e-06, -8.636616122606049e-07, 5.749905340593687e-07, -4.644992178826096e-07, 5.109912172256424e-07, -5.285927442208997e-07, 4.4610491153173465e-07, -3.3435155715273366e-07, 2.2022096388817243e-07, -1.3138808837994352e-07, 1.5788807254228123e-07, -2.6570415873228444e-07, 2.820563887584985e-07, -1.6783703722562406e-07, 4.477559158897425e-08, -2.4698813388799755e-09, 5.082691394016857e-08, -1.364026020206371e-07, 1.6850593650100272e-07, -1.0443374638586546e-07, -6.029473813268628e-10, 5.105380858617091e-08, -1.5066843023282578e-08, -5.630921379297198e-08, 9.561766786891034e-08, -8.044216329068123e-08, 3.359993333902796e-08, 1.692366968619578e-08, -2.021364343358841e-08]
# down to .03, plenty of failures
# Psat_cheb_coeffs = [-188.50329975567104, -190.22994960376462, -0.6992886012204886, 0.3856961269737735, -0.23707446208582353, 0.15363415372584763, -0.10221883018831106, 0.06869084576669, -0.046030774233320346, 0.03037297246598552, -0.019421744608583133, 0.011732910491046633, -0.006355800820106353, 0.0026413894471214202, -0.0001333621829559692, -0.0014967435287118152, 0.002489721202961943, -0.00302447283347462, 0.0032350727289014642, -0.0032223921492743357, 0.0030622558268892, -0.0028113049747675455, 0.002511348612059362, -0.002192644454555338, 0.0018764599744331163, -0.0015770771123065552, 0.0013034116032509804, -0.0010603100672178776, 0.00084960767850329, -0.0006709816561447436, 0.0005226330473731801, -0.0004018349441941878, 0.0003053468509191052, -0.00022974201509485604, 0.00017163053097478257, -0.0001278303586505278, 9.545950876002835e-05, -7.200007894259846e-05, 5.5312909934416405e-05, -4.3632781581719854e-05, 3.554641644507928e-05, -2.99488097950353e-05, 2.6011962388807256e-05, -2.3127603908643427e-05, 2.0875472981740965e-05, -1.8975408339047864e-05, 1.7255291079923385e-05, -1.562250114123633e-05, 1.4033483268247027e-05, -1.2483202707948607e-05, 1.0981181475278024e-05, -9.547990214685254e-06, 8.20534723265339e-06, -6.970215811404035e-06, 5.857096216944197e-06, -4.8714713996210945e-06, 4.015088107327757e-06, -3.2837642912761844e-06, 2.6688332761922373e-06, -2.1605704853781956e-06, 1.745415965345872e-06, -1.4112782858614675e-06, 1.1450344603347899e-06, -9.34468189749192e-07, 7.693687927218034e-07, -6.395653830685742e-07, 5.378418354520407e-07, -4.570688107726579e-07, 3.922470141699613e-07, -3.396066879296283e-07, 2.9547505651179775e-07, -2.5824629138078686e-07, 2.259435099158857e-07, -1.9759059073588738e-07, 1.7245665023281603e-07, -1.499107122703144e-07, 1.2993920706246258e-07, -1.1188458371271578e-07, 9.59786582193289e-08, -8.193904465038978e-08, 6.951736088200208e-08, -5.883242593822998e-08, 4.953479013200448e-08, -4.159778119910192e-08, 3.4903544554923914e-08, -2.9199660726126307e-08, 2.4491065764276586e-08, -2.0543807377807442e-08, 1.716620639244989e-08, -1.4598093803545008e-08, 1.2247184453541803e-08, -1.0378062685590349e-08, 8.941636289359033e-09, -7.547512972569913e-09, 6.5406029883590885e-09, -5.55017639345453e-09, 4.857924129262302e-09, -4.170327848134446e-09, 3.5473818590708514e-09, -3.1820101162273115e-09, 2.634813506155291e-09, -2.3186710334946806e-09, 1.9854991410760484e-09, -1.698026932061246e-09, 1.4939355398374196e-09, -1.2257013267845049e-09, 1.1034926144506615e-09, -8.867213325365261e-10, 7.759313594207437e-10, -6.85530513757325e-10, 5.315937675947832e-10, -5.001264119638624e-10, 4.2230130059116994e-10, -3.259379961024697e-10, 2.8696408042785254e-10, -2.654348289559891e-10, 2.240260857681517e-10, -1.5881755448515084e-10, 1.7089871651079086e-10, -1.743032336304004e-10, 5.736029218880029e-11, -9.974594793790009e-11, 1.2854164813721342e-10, -5.569999528883679e-11, 5.432760350528726e-11, -5.900487596351839e-11, 7.348655484042815e-11, 1.9834070367000245e-12, 3.887800704201888e-11, -6.528210426664377e-11, 6.144420801150463e-12, -2.0697350409069892e-11, 9.512216860539657e-12, -4.439607915237426e-11, -1.6185927706642567e-11, -2.8071628138323645e-12, 6.158579755107668e-11, 2.148407244207534e-11, 5.277970985609337e-13, -9.859059640730805e-12, 4.1564767036192385e-12, -1.5577673049063656e-11, -1.2654069415571345e-12, -1.9761710714008562e-12, 9.40276686806768e-12, 4.583732482119074e-13, -1.8523582732792032e-11, -1.7428972653131536e-11, 2.334371921024897e-11, 1.2661569384099514e-11, -2.4431492094169338e-11, -2.720598171659233e-11, 1.579179961710281e-11, 4.682966091729829e-11, 2.026395923889618e-11, -4.163510324266956e-11, -2.7091399111035808e-11, 3.978859743850732e-11, 3.993365393136633e-11, -2.4706365750991333e-11, -2.8201589338545247e-11]
# Psat_cheb_coeffs = [-188.81248459710693, -190.53226813843213, -0.6992718797266877, 0.3857083557782601, -0.23710917890714395, 0.15368561772753983, -0.10228211161653594, 0.06876166878498034, -0.046105558737181966, 0.030448740221432544, -0.019496099441454324, 0.01180400058944964, -0.006422229275450882, 0.002702227307086234, -0.00018800410519084597, -0.0014485238631714243, 0.0024479474900583895, -0.002988894024752606, 0.0032053382330997785, -0.003197984048551589, 0.0030426262430619812, -0.0027958384579597137, 0.0024994432437511482, -0.00218371114178375, 0.0018699437151919942, -0.0015724843629802854, 0.0013002928376298992, -0.0010582955457831876, 0.0008483768179051751, -0.0006702845742590901, 0.0005222702922150421, -0.0004016564112164708, 0.0003052504825598366, -0.00022965330503168022, 0.00017151209256412164, -0.00012765639237664444, 9.522751362437718e-05, -7.17145087909031e-05, 5.498576051758942e-05, -4.328024825801364e-05, 3.518008638334846e-05, -2.9585552080573432e-05, 2.5660899927246663e-05, -2.2801213593209296e-05, 2.0579135430209277e-05, -1.871227629774825e-05, 1.702697381072197e-05, -1.5427107330232484e-05, 1.3871955438611369e-05, -1.235063269577285e-05, 1.087503047126396e-05, -9.463372111120008e-06, 8.138409928400627e-06, -6.918751587310431e-06, 5.817036690746729e-06, -4.841268302762132e-06, 3.990762592248579e-06, -3.264055878954419e-06, 2.6526744772618845e-06, -2.146826614278467e-06, 1.7339220505229884e-06, -1.4002686597492801e-06, 1.1352817872143799e-06, -9.252727697582733e-07, 7.610055457905131e-07, -6.319237506120556e-07, 5.30160897737689e-07, -4.5034836164150563e-07, 3.8588236023116243e-07, -3.345288398991865e-07, 2.910099599025734e-07, -2.538502269447694e-07, 2.2221275929649412e-07, -1.9404386102611735e-07, 1.7012903413041972e-07, -1.4791267614537682e-07, 1.281131161442957e-07, -1.1035351009983888e-07, 9.412216917920838e-08, -8.103521480312085e-08, 6.889862034626618e-08, -5.823229805384481e-08, 4.888865274151847e-08, -4.0647361572055817e-08, 3.461181492625629e-08, -2.890818104595808e-08, 2.4189127295759093e-08, -2.036506388954876e-08, 1.6621054692260028e-08, -1.4376599744841544e-08, 1.2262293144383739e-08, -1.0166543599991339e-08, 8.776172074614484e-09, -7.244748882363349e-09, 6.552057774765062e-09, -5.655401910624057e-09, 4.4124427509814644e-09, -4.138406545361605e-09, 3.4155934985322144e-09, -3.1467981765942498e-09, 3.138041596064127e-09, -2.097881746535653e-09, 1.6538597491971884e-09, -1.4302796654967797e-09, 1.3958696624380472e-09, -1.6941697510614072e-09, 1.1559050790778446e-09, -8.424336557798272e-10, 7.445069759938515e-10, -3.8008350586066653e-10, 6.681447868524303e-10, -5.609484209193093e-10, 1.1709177677205352e-10, -5.781259004102078e-10, 5.45265361901197e-10, -1.3987335287680026e-10, 1.7128157135074418e-10, 1.0377866018526204e-10, 1.449451573983006e-10, -4.977625195297418e-10, 1.7368603686632612e-10, -3.571321706516851e-11, -1.6249813391308165e-10, 4.6148221569532015e-11, 3.9554757121876716e-10, -1.0268016727946628e-10, -7.436027752479989e-11, -1.6876374859490107e-10, -4.24547853876368e-11, 9.538626006134858e-12, 1.5150070863903953e-10, 2.7005277922459003e-10, -1.6342760518896042e-11, -4.572503911555491e-10, 4.922727672815753e-11, 9.160300994028991e-11, -7.120976338703244e-11, 2.164872706420613e-10, 1.1646536920908047e-10, -2.7132159904485077e-10, -9.18445653054099e-11, 1.1410414945528784e-10, 1.1967624164073171e-10, -5.5743966043066313e-11, 3.9042323803713426e-11, 4.316392256370049e-11, -1.8428367625021157e-10, -9.040283123061977e-11, 1.857434297108983e-10, 1.592233467198178e-11, -1.173771592481677e-10, 1.1665496090537252e-10, 1.2886364193873557e-10, -2.1093389704449506e-10, -2.4675247129314452e-11, 1.515767676711589e-10, -1.2689980450730342e-10, -4.2776899169681866e-11, 1.6317818359826586e-10, -1.4821901477978135e-11, -5.8141610036405774e-11]
Psat_cheb_coeffs_der = chebder(Psat_cheb_coeffs)
Psat_coeffs_critical_der = polyder(Psat_coeffs_critical[::-1])[::-1]
Psat_cheb_constant_factor = (-2.355355160853182, 0.42489124941587103)
# Psat_cheb_constant_factor = (-19.744219083323905, 0.050649991923423815) # down to .14 Tr
# Psat_cheb_constant_factor = (-20.25334447874608, 0.049376705093756613) # down to .05
# Psat_cheb_constant_factor = (-20.88507690836272, 0.0478830941599295) # down to .05 repolishing
# Psat_cheb_constant_factor = (-51.789209241068214, 0.019310239068163836) # down to .05 with lots of failures C40 only
# Psat_cheb_constant_factor = (-52.392851049631986, 0.01908689378961204) # down to .03, plenty of failures
# Psat_cheb_constant_factor = (-52.47770345042524, 0.01905687810661655)
Psat_cheb_range = (0.003211332390446207, 104.95219556846003)
phi_sat_coeffs = [4.040440857039882e-09, -1.512382901024055e-07, 2.5363900091436416e-06,
-2.4959001060510725e-05, 0.00015714708105355206, -0.0006312347348814933,
0.0013488647482434379, 0.0008510254890166079, -0.017614759099592196,
0.06640627813169839, -0.13427456425899886, 0.1172205279608668,
0.13594473870160448, -0.5560225934266592, 0.7087599054079694,
0.6426353018023558]
_P_zero_l_cheb_coeffs = [0.13358936990391557, -0.20047353906149878, 0.15101308518135467, -0.11422662323168498, 0.08677799907222833, -0.06622719396774103, 0.05078577177767531, -0.03913992025038471, 0.030322206247168845, -0.023618484941949063, 0.018500212460075605, -0.014575143278285305, 0.011551352410948363, -0.00921093058565245, 0.007390713292456164, -0.005968132800177682, 0.00485080886172241, -0.003968872414987763, 0.003269291360484698, -0.002711665819666899, 0.0022651044970457743, -0.0019058978265104418, 0.0016157801830935644, -0.0013806283122768208, 0.0011894838915417153, -0.0010338173333182162, 0.0009069721482541163, -0.0008037443041438563, 0.0007200633946601682, -0.0006527508698173454, 0.0005993365082194993, -0.0005579199462298259, 0.0005270668422661141, -0.0005057321913053223, 0.0004932057251527365, -0.00024453764761005106]
P_zero_l_cheb_limits = (0.002068158270122966, 27.87515959722943)
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.b = b = self.c2R*Tc/Pc
self.a = b*Tc*self.c1R2_c2R
self.kappa = omega*(-0.26992*omega + 1.54226) + 0.37464
self.delta, self.epsilon = 2.0*b, -b*b
self.solve()
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for this EOS. Uses the set values of
`Tc`, `kappa`, and `a`.
.. math::
a\alpha = a \left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}}
+ 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature at which to calculate the value, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
Examples
--------
Dodecane at 250 K:
>>> eos = PR(Tc=658.0, Pc=1820000.0, omega=0.562, T=500., P=1e5)
>>> eos.a_alpha_pure(250.0)
15.66839156301
'''
x0 = (1.0 + self.kappa*(1.0 - sqrt(T/self.Tc)))
return self.a*x0*x0
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `Tc`, `kappa`, and `a`.
.. math::
a\alpha = a \left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}}
+ 1\right) + 1\right)^{2}
.. math::
\frac{d a\alpha}{dT} = - \frac{1.0 a \kappa}{T^{0.5} Tc^{0.5}}
\left(\kappa \left(- \frac{T^{0.5}}{Tc^{0.5}} + 1\right) + 1\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = 0.5 a \kappa \left(- \frac{1}{T^{1.5}
Tc^{0.5}} \left(\kappa \left(\frac{T^{0.5}}{Tc^{0.5}} - 1\right)
- 1\right) + \frac{\kappa}{T^{1.0} Tc^{1.0}}\right)
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
Examples
--------
Dodecane at 250 K:
>>> eos = PR(Tc=658.0, Pc=1820000.0, omega=0.562, T=500., P=1e5)
>>> eos.a_alpha_and_derivatives_pure(250.0)
(15.66839156301, -0.03094091246957, 9.243186769880e-05)
'''
# TODO custom water a_alpha?
# Peng, DY, and DB Robinson. "Two-and Three-Phase Equilibrium Calculations
# for Coal Gasification and Related Processes,", 1980
# Thermodynamics of aqueous systems with industrial applications 133 (1980): 393-414.
# Applies up to Tr .85.
# Suggested in Equations of State And PVT Analysis.
Tc, kappa, a = self.Tc, self.kappa, self.a
x0 = sqrt(T)
x1 = 1.0/sqrt(Tc)
x2 = kappa*(x0*x1 - 1.) - 1.
x3 = a*kappa
x4 = x1*x2/x0
a_alpha = a*x2*x2
da_alpha_dT = x4*x3
d2a_alpha_dT2 = 0.5*x3*(kappa*x1*x1 - x4)/T
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def d3a_alpha_dT3_pure(self, T):
r'''Method to calculate the third temperature derivative of `a_alpha`.
Uses the set values of `Tc`, `kappa`, and `a`. This property is not
normally needed.
.. math::
\frac{d^3 a\alpha}{dT^3} = \frac{3 a\kappa \left(- \frac{\kappa}
{T_{c}} + \frac{\sqrt{\frac{T}{T_{c}}} \left(\kappa \left(\sqrt{\frac{T}
{T_{c}}} - 1\right) - 1\right)}{T}\right)}{4 T^{2}}
Parameters
----------
T : float
Temperature at which to calculate the derivative, [-]
Returns
-------
d3a_alpha_dT3 : float
Third temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^3]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
Examples
--------
Dodecane at 500 K:
>>> eos = PR(Tc=658.0, Pc=1820000.0, omega=0.562, T=500., P=1e5)
>>> eos.d3a_alpha_dT3_pure(500.0)
-9.8038800671e-08
'''
kappa = self.kappa
x0 = 1.0/self.Tc
T_inv = 1.0/T
x1 = sqrt(T*x0)
return -self.a*0.75*kappa*(kappa*x0 - x1*(kappa*(x1 - 1.0) - 1.0)*T_inv)*T_inv*T_inv
def P_max_at_V(self, V):
r'''Method to calculate the maximum pressure the EOS can create at a
constant volume, if one exists; returns None otherwise.
Parameters
----------
V : float
Constant molar volume, [m^3/mol]
Returns
-------
P : float
Maximum possible isochoric pressure, [Pa]
Notes
-----
The analytical determination of this formula involved some part of the
discriminant, and much black magic.
Examples
--------
>>> e = PR(P=1e5, V=0.0001437, Tc=512.5, Pc=8084000.0, omega=0.559)
>>> e.P_max_at_V(e.V)
2247886208.7
'''
'''# Partial notes on how this was determined.
from sympy import *
P, T, V = symbols('P, T, V', positive=True)
Tc, Pc, omega = symbols('Tc, Pc, omega', positive=True)
R, a, b, kappa = symbols('R, a, b, kappa')
main = P*R*Tc*V**2 + 2*P*R*Tc*V*b - P*R*Tc*b**2 - P*V*a*kappa**2 + P*a*b*kappa**2 + R*Tc*a*kappa**2 + 2*R*Tc*a*kappa + R*Tc*a
to_subs = {b: thing.b,
kappa: thing.kappa,
a: thing.a, R: thermo.eos.R, Tc: thing.Tc, V: thing.V, Tc: thing.Tc, omega: thing.omega}
solve(Eq(main, 0), P)[0].subs(to_subs)
'''
try:
Tc, a, b, kappa = self.Tc, self.a, self.b, self.kappa
except:
Tc, a, b, kappa = self.Tcs[0], self.ais[0], self.bs[0], self.kappas[0]
P_max = (-R*Tc*a*(kappa**2 + 2*kappa + 1)/(R*Tc*V**2 + 2*R*Tc*V*b - R*Tc*b**2 - V*a*kappa**2 + a*b*kappa**2))
if P_max < 0.0:
# No positive pressure - it's negative
return None
return P_max
# (V - b)**3*(V**2 + 2*V*b - b**2)*(P*R*Tc*V**2 + 2*P*R*Tc*V*b - P*R*Tc*b**2 - P*V*a*kappa**2 + P*a*b*kappa**2 + R*Tc*a*kappa**2 + 2*R*Tc*a*kappa + R*Tc*a)
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the PR
EOS. Uses `Tc`, `a`, `b`, and `kappa` as well, obtained from the
class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
The exact solution can be derived as follows, and is excluded for
breviety.
>>> from sympy import *
>>> P, T, V = symbols('P, T, V')
>>> Tc, Pc, omega = symbols('Tc, Pc, omega')
>>> R, a, b, kappa = symbols('R, a, b, kappa')
>>> a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2
>>> PR_formula = R*T/(V-b) - a_alpha/(V*(V+b)+b*(V-b)) - P
>>> #solve(PR_formula, T)
After careful evaluation of the results of the analytical formula,
it was discovered, that numerical precision issues required several
NR refinement iterations; at at times, when the analytical value is
extremely erroneous, a call to a full numerical solver not using the
analytical solution at all is required.
Examples
--------
>>> eos = PR(Tc=658.0, Pc=1820000.0, omega=0.562, T=500., P=1e5)
>>> eos.solve_T(P=eos.P, V=eos.V_g)
500.0000000
'''
self.no_T_spec = True
Tc, a, b, kappa = self.Tc, self.a, self.b, self.kappa
# Needs to be improved to do a NR or two at the end!
x0 = V*V
x1 = R*Tc
x2 = x0*x1
x3 = kappa*kappa
x4 = a*x3
x5 = b*x4
x6 = 2.*V*b
x7 = x1*x6
x8 = b*b
x9 = x1*x8
x10 = V*x4
thing = (x2 - x10 + x5 + x7 - x9)
x11 = thing*thing
x12 = x0*x0
x13 = R*R
x14 = Tc*Tc
x15 = x13*x14
x16 = x8*x8
x17 = a*a
x18 = x3*x3
x19 = x17*x18
x20 = x0*V
x21 = 2.*R*Tc*a*x3
x22 = x8*b
x23 = 4.*V*x22
x24 = 4.*b*x20
x25 = a*x1
x26 = x25*x8
x27 = x26*x3
x28 = x0*x25
x29 = x28*x3
x30 = 2.*x8
x31 = (6.*V*x27 - 2.*b*x29 + x0*x13*x14*x30 + x0*x19 + x12*x15
+ x15*x16 - x15*x23 + x15*x24 - x19*x6 + x19*x8 - x20*x21
- x21*x22)
V_m_b = V - b
x33 = 2.*(R*Tc*a*kappa)
x34 = P*x2
x35 = P*x5
x36 = x25*x3
x37 = P*x10
x38 = P*R*Tc
x39 = V*x17
x40 = 2.*kappa*x3
x41 = b*x17
x42 = P*a*x3
# 2.*a*kappa - add a negative sign to get the high temperature solution
# sometimes it is complex!
# try:
root_term = sqrt(V_m_b**3*(x0 + x6 - x8)*(P*x7 -
P*x9 + x25 + x33 + x34 + x35
+ x36 - x37))
# except ValueError:
# # negative number in sqrt
# return super(PR, self).solve_T(P, V)
x100 = 2.*a*kappa*x11*(root_term*(kappa + 1.))
x101 = (x31*V_m_b*((4.*V)*(R*Tc*a*b*kappa) + x0*x33 - x0*x35 + x12*x38
+ x16*x38 + x18*x39 - x18*x41 - x20*x42 - x22*x42
- x23*x38 + x24*x38 + x25*x6 - x26 - x27 + x28 + x29
+ x3*x39 - x3*x41 + x30*x34 - x33*x8 + x36*x6
+ 3*x37*x8 + x39*x40 - x40*x41))
x102 = -Tc/(x11*x31)
T_calc = (x102*(x100 - x101)) # Normally the correct root
if T_calc < 0.0:
# Ruined, call the numerical method; sometimes it happens
return super(PR, self).solve_T(P, V, solution=solution)
Tc_inv = 1.0/Tc
T_calc_high = (x102*(-x100 - x101))
if solution is not None and solution == 'g':
T_calc = T_calc_high
if True:
c1, c2 = R/(V_m_b), a/(V*(V+b) + b*V_m_b)
rt = (T_calc*Tc_inv)**0.5
alpha_root = (1.0 + kappa*(1.0-rt))
err = c1*T_calc - alpha_root*alpha_root*c2 - P
if abs(err/P) > 1e-2:
# Numerical issue - such a bad solution we cannot converge
return super(PR, self).solve_T(P, V, solution=solution)
# Newton step - might as well compute it
derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc
if derr == 0.0:
return T_calc
T_calc = T_calc - err/derr
# Step 2 - cannot find occasion to need more steps, most of the time
# this does nothing!
rt = (T_calc*Tc_inv)**0.5
alpha_root = (1.0 + kappa*(1.0-rt))
err = c1*T_calc - alpha_root*alpha_root*c2 - P
derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc
T_calc = T_calc - err/derr
return T_calc
c1, c2 = R/(V_m_b), a/(V*(V+b) + b*V_m_b)
rt = (T_calc_high*Tc_inv)**0.5
alpha_root = (1.0 + kappa*(1.0-rt))
err = c1*T_calc_high - alpha_root*alpha_root*c2 - P
# Newton step - might as well compute it
derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc_high
T_calc_high = T_calc_high - err/derr
# Step 2 - cannot find occasion to need more steps, most of the time
# this does nothing!
rt = (T_calc_high*Tc_inv)**0.5
alpha_root = (1.0 + kappa*(1.0-rt))
err = c1*T_calc_high - alpha_root*alpha_root*c2 - P
derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc_high
T_calc_high = T_calc_high - err/derr
delta, epsilon = self.delta, self.epsilon
w0 = 1.0*(delta*delta - 4.0*epsilon)**-0.5
w1 = delta*w0
w2 = 2.0*w0
# print(T_calc, T_calc_high)
a_alpha_low = a*(1.0 + kappa*(1.0-(T_calc/Tc)**0.5))**2.0
a_alpha_high = a*(1.0 + kappa*(1.0-(T_calc_high/Tc)**0.5))**2.0
err_low = abs((R*T_calc/(V-b) - a_alpha_low/(V*V + delta*V + epsilon) - P))
err_high = abs((R*T_calc_high/(V-b) - a_alpha_high/(V*V + delta*V + epsilon) - P))
# print(err_low, err_high, T_calc, T_calc_high, a_alpha_low, a_alpha_high)
RT_low = R*T_calc
G_dep_low = (P*V - RT_low - RT_low*clog(P/RT_low*(V-b)).real
- w2*a_alpha_low*catanh(2.0*V*w0 + w1).real)
RT_high = R*T_calc_high
G_dep_high = (P*V - RT_high - RT_high*clog(P/RT_high*(V-b)).real
- w2*a_alpha_high*catanh(2.0*V*w0 + w1).real)
# print(G_dep_low, G_dep_high)
# ((err_low > err_high*2)) and
if (T_calc.imag != 0.0 and T_calc_high.imag == 0.0) or (G_dep_high < G_dep_low and (err_high < err_low)):
T_calc = T_calc_high
return T_calc
# if err_high < err_low:
# T_calc = T_calc_high
# for Ti in (T_calc, T_calc_high):
# a_alpha = a*(1.0 + kappa*(1.0-(Ti/Tc)**0.5))**2.0
#
#
# # Compute P, and the difference?
# self.P = float(R*self.T/(V-self.b) - self.a_alpha/(V*V + self.delta*V + self.epsilon)
#
#
#
# RT = R*Ti
# print(RT, V-b, P/RT*(V-b))
# G_dep = (P*V - RT - RT*log(P/RT*(V-b))
# - w2*a_alpha*catanh(2.0*V*w0 + w1).real)
# print(G_dep)
# if G_dep < G_dep_base:
# T = Ti
# G_dep_base = G_dep
# T_calc = T
# print(T_calc, T_calc_high)
# T_calc = (-Tc*(2.*a*kappa*x11*sqrt(V_m_b**3*(x0 + x6 - x8)*(P*x7 -
# P*x9 + x25 + x33 + x34 + x35
# + x36 - x37))*(kappa + 1.) -
# x31*V_m_b*((4.*V)*(R*Tc*a*b*kappa) + x0*x33 - x0*x35 + x12*x38
# + x16*x38 + x18*x39 - x18*x41 - x20*x42 - x22*x42
# - x23*x38 + x24*x38 + x25*x6 - x26 - x27 + x28 + x29
# + x3*x39 - x3*x41 + x30*x34 - x33*x8 + x36*x6
# + 3*x37*x8 + x39*x40 - x40*x41))/(x11*x31))
# print(T_calc2/T_calc)
# Validation code - although the solution is analytical some issues
# with floating points can still occur
# Although 99.9 % of points anyone would likely want are plenty good,
# there are some edge cases as P approaches T or goes under it.
# c1, c2 = R/(V_m_b), a/(V*(V+b) + b*V_m_b)
#
# rt = (T_calc*Tc_inv)**0.5
# alpha_root = (1.0 + kappa*(1.0-rt))
# err = c1*T_calc - alpha_root*alpha_root*c2 - P
#
# # Newton step - might as well compute it
# derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc
# T_calc = T_calc - err/derr
#
# # Step 2 - cannot find occasion to need more steps, most of the time
# # this does nothing!
# rt = (T_calc*Tc_inv)**0.5
# alpha_root = (1.0 + kappa*(1.0-rt))
# err = c1*T_calc - alpha_root*alpha_root*c2 - P
# derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc
# T_calc = T_calc - err/derr
## print(T_calc)
# return T_calc
# P_inv = 1.0/P
# if abs(err/P) < 1e-6:
# return T_calc
## print(abs(err/P))
## return GCEOS.solve_T(self, P, V)
# for i in range(7):
# rt = (T_calc*Tc_inv)**0.5
# alpha_root = (1.0 + kappa*(1.0-rt))
# err = c1*T_calc - alpha_root*alpha_root*c2 - P
# derr = c1 + c2*kappa*rt*(kappa*(1.0 -rt) + 1.0)/T_calc
#
# T_calc = T_calc - err/derr
# print(err/P, T_calc, derr)
# if abs(err/P) < 1e-12:
# return T_calc
# return T_calc
# starts at 0.0008793111898930736
# Psat_ranges_low = (0.011527649224138653,
# 0.15177700441811506, 0.7883172905889053, 2.035659276638337,
# 4.53501754500169, 10.745446771738406, 22.67639480888016,
# 50.03388490796283, 104.02786866285064)
# 2019 Nov
# Psat_ranges_low = (0.15674244743681393, 0.8119861320343748, 2.094720219302703, 4.960845727141835, 11.067460617890934, 25.621853405705796, 43.198888850643804, 104.02786866285064)
# Psat_coeffs_low = [[-227953.8193412378, 222859.8202525231, -94946.0644714779, 22988.662866916213, -3436.218010266234, 314.10561626462993, -12.536721169650086, -2.392026378146748, 1.7425442228873158, -1.2062891595039678, 0.9256591091303878, -0.7876053099939332, 0.5624587154041579, -3.3553013976814365, 5.4012350148013866e-14], [0.017979999443171253, -0.1407329351142875, 0.5157655870958351, -1.1824391743389553, 1.9175463304080598, -2.370060249233812, 2.3671981077067543, -2.0211919069051754, 1.5662532616167582, -1.1752554496422438, 0.9211423805826566, -0.7870983088912286, 0.5624192663836626, -3.3552995268181935, -4.056076807756881e-08], [2.3465238783212443e-06, -5.1803023754491137e-05, 0.0005331498955415226, -0.0034021195248914006, 0.015107808977575897, -0.04968952806811015, 0.12578046832772882, -0.25143473221174495, 0.40552536074726614, -0.5443994966086247, 0.6434269285808626, -0.6923484892423339, 0.5390886452491613, -3.3516377955152628, -0.0002734868035272342], [-4.149916661961022e-10, 2.1845922714910234e-08, -5.293093383029167e-07, 7.799519138713084e-06, -7.769053551547911e-05, 0.0005486109959120195, -0.0027872878510967723, 0.010013711509364028, -0.023484350891214936, 0.024784713187904924, 0.04189568427991252, -0.2040017547275196, 0.25395831370937016, -3.2456178797446413, -0.01903130694686439], [5.244405747881219e-16, -1.5454390343008565e-14, -2.0604241377631507e-12, 1.8208689279561933e-10, -7.250743412052849e-09, 1.8247981842001254e-07, -3.226779942705286e-06, 4.21332816427672e-05, -0.00041707954900317614, 0.003173654759907457, -0.01868692125208627, 0.0855653889368932, -0.31035507126284995, -2.6634237299183328, -0.2800897855694018], [-2.1214680302656463e-19, 5.783021422459962e-17, -7.315923275334905e-15, 5.698692571821259e-13, -3.0576045765082714e-11, 1.1975824393534794e-09, -3.540115921441331e-08, 8.052781011110919e-07, -1.424237637885889e-05, 0.00019659116938228988, -0.0021156267397923314, 0.017700252965885416, -0.11593142002481696, -3.013661988282298, 0.01996154251720128], [-2.8970166603270677e-23, 1.694610551839978e-20, -4.467776279776866e-18, 7.096773522723984e-16, -7.632413053542317e-14, 5.906374821509563e-12, -3.4056397726361876e-10, 1.4928364875485495e-08, -5.025465019680778e-07, 1.3027126331371714e-05, -0.00025915855275578494, 0.003928557567224198, -0.04532442889219183, -3.235941699431832, 0.33934709098936366], [-1.0487638177712636e-27, 1.1588074100262264e-24, -5.933272229330526e-22, 1.8676144445612704e-19, -4.0425091708892395e-17, 6.37584823835825e-15, -7.573969719222655e-13, 6.907076002118451e-11, -4.883344880881757e-09, 2.6844313931168583e-07, -1.1443544240867529e-05, 0.0003760349651708502, -0.009520080664949915, -3.464433298845877, 1.0399494170785033]]
# 2019 Dec 08 #1
# Psat_ranges_low = ([0.1566663623710075, 0.8122712349481437, 2.0945197784666294, 4.961535043425216, 11.064718660459363, 25.62532893636351, 43.17405809523583, 85.5638421625653, 169.8222874125952)
# Psat_coeffs_low = [[-6.364470992262544e-23, 1.5661396802352383e-19, -1.788719435685493e-16, 1.2567790299823932e-13, -6.068855158259506e-11, 2.130642024043302e-08, -5.608337854780211e-06, 0.0011243910475529856, -0.17253439771817053, 20.164796917496496, -1766.983966143576, 112571.42973915562, -4928969.89775339, 132767165.35442507, -1659856970.7084315], [-6.755028337063007e-31, 1.2373135465776702e-27, -1.0534911582623026e-24, 5.532082037130418e-22, -2.0042818462405888e-19, 5.3092667094437664e-17, -1.0629813459498251e-14, 1.6396189295145161e-12, -1.9677160870915945e-10, 1.8425759971191095e-08, -1.3425348946576017e-06, 7.562661739651473e-05, -0.0032885862389808195, -3.5452990752336735, 1.5360178058346605], [-5.909795950371768e-27, 5.645060782013921e-24, -2.5062698828832408e-21, 6.861883492029141e-19, -1.2960098086863643e-16, 1.7893963536931406e-14, -1.8669999568680822e-12, 1.5005071785133313e-10, -9.381783948347974e-09, 4.576967837674971e-07, -1.7378660968493725e-05, 0.0005105597560223805, -0.011603105202254462, -3.4447117223858394, 0.9538198797898474], [-2.8780483706946006e-23, 1.4693097909367858e-20, -3.492711723365092e-18, 5.129438453755985e-16, -5.2066819983096923e-14, 3.87131295903126e-12, -2.1797843188384387e-10, 9.475510493050094e-09, -3.212229879279181e-07, 8.520129885652724e-06, -0.00017645941977890718, 0.0028397690069188186, -0.035584878748907235, -3.2889972189483, 0.47227047696507896], [-2.133647784270567e-19, 5.813855761166538e-17, -7.351939324704256e-15, 5.724415520048679e-13, -3.0701524683808055e-11, 1.2020043191332715e-09, -3.5517231986184477e-08, 8.075833591581873e-07, -1.4277180602174389e-05, 0.0001969886336996064, -0.0021190060629508248, 0.017720993486168023, -0.11601827744842373, -3.0134398433062954, 0.019699769017179847], [5.217055552725474e-16, -1.561972494582649e-14, -2.027739589933126e-12, 1.8030004183143271e-10, -7.1961213928967356e-09, 1.8138160781745565e-07, -3.2112101506231723e-06, 4.197218861582643e-05, -0.00041584453068251905, 0.0031666287443832307, -0.018657602063128432, 0.08547811393673718, -0.31017952035114504, -2.6636376461277504, -0.27997050354186115], [-4.1558987320232216e-10, 2.1874838982254277e-08, -5.299524926441045e-07, 7.808241563359814e-06, -7.777110034030892e-05, 0.0005491470176474339, -0.002789936581283384, 0.010023585334231266, -0.023512249664927133, 0.02484416646533969, 0.04180162903589153, -0.20389464760201653, 0.25387532037317434, -3.245578712101638, -0.01903980099778657], [2.3320945490434305e-06, -5.15194336734163e-05, 0.0005305911686609431, -0.003388078003236081, 0.015055473744080193, -0.049549442201717114, 0.12550289037335455, -0.251021291476035, 0.40506041321992375, -0.5440068047537978, 0.6431818377117259, -0.6922389245218481, 0.5390554975784367, -3.3516317236219626, -0.00027399457467680577], [0.017760683349597454, -0.1392342452029993, 0.5111179189769633, -1.1737814955588932, 1.9067391494716879, -2.3605113086814407, 2.361048334775187, -2.0182633656154794, 1.5652184041682835, -1.1749857171593956, 0.92109138142958, -0.7870915307971148, 0.5624186680171368, -3.3552994954150326, -4.130013597780646e-08], [1842638.012244339, -2064103.5077599594, 1029111.4284441478, -300839.92590603326, 57174.96949130112, -7405.305505076668, 668.4504791023379, -43.94219790319933, 3.4634979070792977, -1.2528527563309222, 0.9264289045482768, -0.787612207652486, 0.5624587411994793, -3.3553013976928456, 4.846123502488808e-14]]
# 2019 Dec 08 #2
# Psat_ranges_low = (0.15674244743681393, 0.8119861320343748, 2.094720219302703, 4.961535043425216, 11.064718660459363, 25.62532893636351, 43.17405809523583, 85.5638421625653, 169.8222874125952, 192.707581659434)
# Psat_coeffs_low = [[-393279.9328001248, 414920.88015712175, -194956.1186003408, 53799.692378381624, -9679.442200674115, 1189.1133946984114, -99.38789237175924, 3.7558250389696366, 1.4341105372610397, -1.195532646019414, 0.9254075742030472, -0.7876016031722438, 0.5624586846061402, -3.355301397567417, -2.475797344914099e-14], [0.018200741617324958, -0.14216111513088853, 0.5199706046777292, -1.1898993034816217, 1.9264460624802726, -2.377604380463091, 2.3718790446551283, -2.0233492715449346, 1.5669946704278936, -1.175444344921655, 0.9211774746760774, -0.787102916441927, 0.5624196703434721, -3.3552995479850125, -4.006059328709455e-08], [2.362594082154845e-06, -5.213477214805086e-05, 0.0005363047209564668, -0.0034204334370065157, 0.015180294585886198, -0.04989640532490752, 0.1262194343941631, -0.252138050376706, 0.4063802322466773, -0.5451837881722801, 0.643961448026334, -0.6926108644042617, 0.5391763183580807, -3.3516556444811516, -0.00027181665396192045], [-4.1566510211197074e-10, 2.1878563345656593e-08, -5.30037387599558e-07, 7.809422248533072e-06, -7.77822904769859e-05, 0.0005492234565335112, -0.002790324592151159, 0.010025071882175543, -0.023516568419967406, 0.024853633218471893, 0.04178621870041742, -0.20387658476895476, 0.2538609101701838, -3.2455717084245443, -0.019041365569938407], [5.952860605957254e-16, -2.3560872386568428e-14, -1.6328974906691505e-12, 1.6831386671561567e-10, -6.947967158882692e-09, 1.77675502929117e-07, -3.170039732850266e-06, 4.162662881336586e-05, -0.0004136425496617131, 0.0031560285189308705, -0.018619655683130842, 0.085380163769752, -0.3100071777702119, -2.6638226631426187, -0.279879068340815], [-2.1336825570293267e-19, 5.813946215182557e-17, -7.352047876443287e-15, 5.724495165386215e-13, -3.0701923762367554e-11, 1.2020187632285275e-09, -3.5517621350872006e-08, 8.075912994222895e-07, -1.4277303680626562e-05, 0.00019699007656794466, -0.00211901865445771, 0.01772107279538477, -0.1160186182468458, -3.0134389491023668, 0.019698688209032866], [-2.8780483706946006e-23, 1.4693097909367858e-20, -3.492711723365092e-18, 5.129438453755985e-16, -5.2066819983096923e-14, 3.87131295903126e-12, -2.1797843188384387e-10, 9.475510493050094e-09, -3.212229879279181e-07, 8.520129885652724e-06, -0.00017645941977890718, 0.0028397690069188186, -0.035584878748907235, -3.2889972189483, 0.47227047696507896], [-5.909795950371768e-27, 5.645060782013921e-24, -2.5062698828832408e-21, 6.861883492029141e-19, -1.2960098086863643e-16, 1.7893963536931406e-14, -1.8669999568680822e-12, 1.5005071785133313e-10, -9.381783948347974e-09, 4.576967837674971e-07, -1.7378660968493725e-05, 0.0005105597560223805, -0.011603105202254462, -3.4447117223858394, 0.9538198797898474], [-6.755028337063007e-31, 1.2373135465776702e-27, -1.0534911582623026e-24, 5.532082037130418e-22, -2.0042818462405888e-19, 5.3092667094437664e-17, -1.0629813459498251e-14, 1.6396189295145161e-12, -1.9677160870915945e-10, 1.8425759971191095e-08, -1.3425348946576017e-06, 7.562661739651473e-05, -0.0032885862389808195, -3.5452990752336735, 1.5360178058346605], [-6.364470992262544e-23, 1.5661396802352383e-19, -1.788719435685493e-16, 1.2567790299823932e-13, -6.068855158259506e-11, 2.130642024043302e-08, -5.608337854780211e-06, 0.0011243910475529856, -0.17253439771817053, 20.164796917496496, -1766.983966143576, 112571.42973915562, -4928969.89775339, 132767165.35442507, -1659856970.7084315]]
# 2019 Dec 08 #3
Psat_ranges_low = (0.038515189998761204, 0.6472853332269844, 2.0945197784666294, 4.961232873814024, 11.067553885784903, 25.624838497870584, 43.20169529076582, 85.5588271726612, 192.72834691988226)
Psat_coeffs_low = [[2338676895826482.5, -736415034973095.6, 105113277697825.1, -8995168780410.754, 514360029044.81494, -20734723655.83978, 605871516.8891307, -12994014.122638363, 204831.11357912835, -2351.9913154464143, 18.149657683324232, 0.8151930684866298, -0.7871881357728392, 0.5624577476810062, -3.35530139647672, -4.836964162535651e-13], [-0.13805715433070773, 0.8489231609102119, -2.450329797856018, 4.447856574793218, -5.767299107094559, 5.794674157897756, -4.825296555657044, 3.5520183799445926, -2.4600869594916634, 1.6909163275418595, -1.2021498414235525, 0.9254639369127162, -0.7875982246546266, 0.5624585116206676, -3.3553013938160787, -3.331224185387782e-11], [-2.3814071133383825e-06, 5.318261908739265e-05, -0.0005538990617858645, 0.0035761255785055936, -0.016054997425247523, 0.05333504500541739, -0.13636391080337568, 0.27593424749870343, -0.4517901507372948, 0.6114112167354924, -0.7059858408782421, 0.7385376731146207, -0.7329884294338728, 0.5509890744823249, -3.353773232516225, -9.646546737407391e-05], [2.6058661808460023e-11, -1.75914103924121e-09, 5.396299167286894e-08, -1.0007922530068192e-06, 1.2554484077194732e-05, -0.0001125821062183067, 0.0007410322067253991, -0.0035992993229111833, 0.012657105041028169, -0.030121969848977304, 0.03753504314148813, 0.02349666014556937, -0.18469580367455368, 0.24005237728233714, -3.239469690554324, -0.020289142467969867], [-1.082394018559102e-15, 1.2914854481231322e-13, -7.104839518580019e-12, 2.3832489222439473e-10, -5.425087002560749e-09, 8.804418548276272e-08, -1.0364065054630989e-06, 8.719985338278278e-06, -4.8325538208084174e-05, 0.00011200959608941485, 0.0008028675551716892, -0.010695106054891056, 0.06594801536296582, -0.27725262867260253, -2.6977571369079514, -0.2635895959694814], [1.1488824622125947e-20, -3.331154652317046e-18, 4.503372697637035e-16, -3.7684497582121125e-14, 2.1852058912840643e-12, -9.313780852814459e-11, 3.019939074381905e-09, -7.605074783395472e-08, 1.5052679183948458e-06, -2.354701523431422e-05, 0.00029127690705745875, -0.0028399757838276493, 0.02173245057169364, -0.13135011490812692, -2.9774476427885146, -0.01942256817236654], [1.0436558787976772e-24, -5.473723131383567e-22, 1.3452696879486453e-19, -2.0573736968717295e-17, 2.1924486360657888e-15, -1.7272619586846295e-13, 1.0413985148866247e-11, -4.906312890258065e-10, 1.8279149292524938e-08, -5.414588408693672e-07, 1.275367009914141e-05, -0.00023786604002741, 0.0034903075344121025, -0.04033658323380905, -3.2676007023496245, 0.42749816097639837], [9.060766533667912e-29, -9.196819760777788e-26, 4.3601925662975664e-23, -1.2818245897574232e-20, 2.615903295904718e-18, -3.930631843509798e-16, 4.500311702777485e-14, -4.007582103109645e-12, 2.808196479352211e-10, -1.5562164421777763e-08, 6.818206236433737e-07, -2.350273523243411e-05, 0.0006326097721162514, -0.013277937187152783, -3.4305615375066876, 0.8983326523220114], [1.1247677438654667e-33, -2.4697583969349065e-30, 2.5286510080356973e-27, -1.6024926981128421e-24, 7.03655740810716e-22, -2.2705238015446456e-19, 5.57121222696514e-17, -1.0609879702627998e-14, 1.5863699537553053e-12, -1.8713657213281574e-10, 1.7407548458856668e-08, -1.2702047168798462e-06, 7.210856106809965e-05, -0.0031754110755806966, -3.5474790036315795, 1.555110704923493]]
class PR78(PR):
r'''Class for solving the Peng-Robinson cubic
equation of state for a pure compound according to the 1978 variant [1]_ [2]_.
Subclasses :obj:`PR`, which provides everything except the variable `kappa`.
Solves the EOS on initialization. See :obj:`PR` for further documentation.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa_i = 0.37464+1.54226\omega_i-0.26992\omega_i^2 \text{ if } \omega_i
\le 0.491
.. math::
\kappa_i = 0.379642 + 1.48503 \omega_i - 0.164423\omega_i^2 + 0.016666
\omega_i^3 \text{ if } \omega_i > 0.491
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (furfuryl alcohol), liquid phase:
>>> eos = PR78(Tc=632, Pc=5350000, omega=0.734, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 8.3519628969e-05, -63764.671093, -130.737153225)
Notes
-----
This variant is recommended over the original.
References
----------
.. [1] Robinson, Donald B, and Ding-Yu Peng. The Characterization of the
Heptanes and Heavier Fractions for the GPA Peng-Robinson Programs.
Tulsa, Okla.: Gas Processors Association, 1978.
.. [2] Robinson, Donald B., Ding-Yu Peng, and Samuel Y-K Chung. "The
Development of the Peng - Robinson Equation and Its Application to Phase
Equilibrium in a System Containing Methanol." Fluid Phase Equilibria 24,
no. 1 (January 1, 1985): 25-41. doi:10.1016/0378-3812(85)87035-7.
'''
low_omega_constants = (0.37464, 1.54226, -0.26992)
'''Constants for the `kappa` formula for the low-omega region.'''
high_omega_constants = (0.379642, 1.48503, -0.164423, 0.016666)
'''Constants for the `kappa` formula for the high-omega region.'''
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.a = self.c1R2*Tc*Tc/Pc
self.b = b = self.c2R*Tc/Pc
self.delta, self.epsilon = 2.0*b, -b*b
if omega <= 0.491:
self.kappa = 0.37464 + omega*(1.54226 - 0.26992*omega)
else:
self.kappa = omega*(omega*(0.016666*omega - 0.164423) + 1.48503) + 0.379642
self.solve()
class PRTranslated(PR):
r'''Class for solving the volume translated Peng-Robinson equation of state.
Subclasses :obj:`PR`. Solves the EOS on initialization.
This is intended as a base class for all translated variants of the
Peng-Robinson EOS.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa=0.37464+1.54226\omega-0.26992\omega^2
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple or None
Coefficients which may be specified by subclasses; set to None to use
the original Peng-Robinson alpha function, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization:
>>> eos = PRTranslated(T=305, P=1.1e5, Tc=512.5, Pc=8084000.0, omega=0.559, c=-1e-6)
>>> eos.phase, eos.V_l, eos.V_g
('l/g', 4.90798083711e-05, 0.0224350982488)
Notes
-----
References
----------
.. [1] Gmehling, Jürgen, Michael Kleiber, Bärbel Kolbe, and Jürgen Rarey.
Chemical Thermodynamics for Process Simulation. John Wiley & Sons, 2019.
'''
solve_T = GCEOS.solve_T
P_max_at_V = GCEOS.P_max_at_V
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, alpha_coeffs=None, c=0.0, T=None, P=None,
V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1R2*Tc*Tc*Pc_inv
self.c = c
if alpha_coeffs is None:
self.kappa = omega*(-0.26992*omega + 1.54226) + 0.37464
# Does not have an impact on phase equilibria
self.alpha_coeffs = alpha_coeffs
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
# self.C0, self.C1, self.C2 = Twu_coeffs
b0 = self.c2*R*Tc*Pc_inv
self.b = b = b0 - c
# Cannot reference b directly
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*c + 2.0*c*b0
# C**2 + 2*C*b + V**2 + V*(2*C + 2*b) - b**2
self.solve()
class PRTranslatedPPJP(PRTranslated):
r'''Class for solving the volume translated Pina-Martinez, Privat, Jaubert,
and Peng revision of the Peng-Robinson equation of state
for a pure compound according to [1]_.
Subclasses :obj:`PRTranslated`, which provides everything except the variable `kappa`.
Solves the EOS on initialization. See :obj:`PRTranslated` for further documentation.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa = 0.3919 + 1.4996 \omega - 0.2721\omega^2 + 0.1063\omega^3
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (methanol), liquid phase:
>>> eos = PRTranslatedPPJP(Tc=507.6, Pc=3025000, omega=0.2975, c=0.6390E-6, T=250., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.0001229231238092, -33466.2428296, -80.75610242427)
Notes
-----
This variant offers incremental improvements in accuracy only, but those
can be fairly substantial for some substances.
References
----------
.. [1] Pina-Martinez, Andrés, Romain Privat, Jean-Noël Jaubert, and
Ding-Yu Peng. "Updated Versions of the Generalized Soave α-Function
Suitable for the Redlich-Kwong and Peng-Robinson Equations of State."
Fluid Phase Equilibria, December 7, 2018.
https://doi.org/10.1016/j.fluid.2018.12.007.
'''
# Direct solver for T could be implemented but cannot use the PR one
kwargs_keys = ('c',)
def __init__(self, Tc, Pc, omega, c=0.0, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1*R2*Tc*Tc*Pc_inv
self.c = c
# 0.3919 + 1.4996*omega - 0.2721*omega**2+0.1063*omega**3
self.kappa = omega*(omega*(0.1063*omega - 0.2721) + 1.4996) + 0.3919
self.kwargs = {'c': c}
b0 = self.c2*R*Tc*Pc_inv
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*c + 2.0*c*b0
self.solve()
def P_max_at_V(self, V):
if self.c == 0.0:
return PR.P_max_at_V(self, V)
return None
class PRTranslatedPoly(Poly_a_alpha, PRTranslated):
r'''Class for solving the volume translated Peng-Robinson equation of state
with a polynomial alpha function. With the right coefficients, this model
can reproduce any property incredibly well.
Subclasses :obj:`PRTranslated`. Solves the EOS on initialization.
This is intended as a base class for all translated variants of the
Peng-Robinson EOS.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=f(T)
.. math::
\kappa=0.37464+1.54226\omega-0.26992\omega^2
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple or None
Coefficients which may be specified by subclasses; set to None to use
the original Peng-Robinson alpha function, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
Methanol, with alpha functions reproducing CoolProp's implementation of
its vapor pressure (up to 13 coefficients)
>>> alpha_coeffs_exact = [9.645280470011588e-32, -4.362226651748652e-28, 9.034194757823037e-25, -1.1343330204981244e-21, 9.632898335494218e-19, -5.841502902171077e-16, 2.601801729901228e-13, -8.615431349241052e-11, 2.1202999753932622e-08, -3.829144045293198e-06, 0.0004930777289075716, -0.04285337965522619, 2.2473964123842705, -51.13852710672087]
>>> kwargs = dict(Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=alpha_coeffs_exact, c=1.557458e-05)
>>> eos = PRTranslatedPoly(T=300, P=1e5, **kwargs)
>>> eos.Psat(500)/PropsSI("P", 'T', 500.0, 'Q', 0, 'methanol') # doctest:+SKIP
1.0000112765
Notes
-----
'''
class PRTranslatedMathiasCopeman(Mathias_Copeman_poly_a_alpha, PRTranslated):
pass
class PRTranslatedCoqueletChapoyRichon(PRTranslatedMathiasCopeman):
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, c=0.0, alpha_coeffs=None, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1*R2*Tc*Tc*Pc_inv
self.c = c
if alpha_coeffs is None:
c1 = omega*(0.1316*omega + 1.4031) + 0.3906
c2 = omega*(-1.3127*omega + 0.3015) - 0.1213
c3 = 0.7661*omega + 0.3041
alpha_coeffs = [c3, c2, c1, 1.0]
elif alpha_coeffs[-1] != 1.0:
alpha_coeffs = list(alpha_coeffs)
alpha_coeffs.append(1.0)
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
self.alpha_coeffs = alpha_coeffs
b0 = self.c2*R*Tc*Pc_inv
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*c + 2.0*c*b0
self.solve()
class PRTranslatedTwu(Twu91_a_alpha, PRTranslated):
r'''Class for solving the volume translated Peng-Robinson equation of state
with the Twu (1991) [1]_ alpha function.
Subclasses :obj:`thermo.eos_alpha_functions.Twu91_a_alpha` and :obj:`PRTranslated`.
Solves the EOS on initialization.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha = \left(\frac{T}{T_{c}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c}}
\right)^{c_{2} c_{3}} + 1\right)}
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple(float[3])
Coefficients L, M, N (also called C1, C2, C3) of TWU 1991 form, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization:
>>> alpha_coeffs = (0.694911381318495, 0.919907783415812, 1.70412689631515)
>>> kwargs = dict(Tc=512.5, Pc=8084000.0, omega=0.559, alpha_coeffs=alpha_coeffs, c=-1e-6)
>>> eos = PRTranslatedTwu(T=300, P=1e5, **kwargs)
>>> eos.phase, eos.V_l, eos.V_g
('l/g', 4.8918748906e-05, 0.024314406330)
Notes
-----
This variant offers substantial improvements to the PR-type EOSs - likely
getting about as accurate as this form of cubic equation can get.
References
----------
.. [1] Twu, Chorng H., David Bluck, John R. Cunningham, and John E.
Coon. "A Cubic Equation of State with a New Alpha Function and a
New Mixing Rule." Fluid Phase Equilibria 69 (December 10, 1991):
33-50. doi:10.1016/0378-3812(91)90024-2.
'''
class PRTranslatedConsistent(PRTranslatedTwu):
r'''Class for solving the volume translated Le Guennec, Privat, and Jaubert
revision of the Peng-Robinson equation of state
for a pure compound according to [1]_.
Subclasses :obj:`PRTranslatedTwu`, which provides everything except the
estimation of `c` and the alpha coefficients. This model's `alpha` is based
on the TWU 1991 model; when estimating, `N` is set to 2.
Solves the EOS on initialization. See :obj:`PRTranslated` for further documentation.
.. math::
P = \frac{RT}{v + c - b} - \frac{a\alpha(T)}{(v+c)(v + c + b)+b(v
+ c - b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha = \left(\frac{T}{T_{c}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c}}
\right)^{c_{2} c_{3}} + 1\right)}
If `c` is not provided, it is estimated as:
.. math::
c =\frac{R T_c}{P_c}(0.0198\omega - 0.0065)
If `alpha_coeffs` is not provided, the parameters `L` and `M` are estimated
from the acentric factor as follows:
.. math::
L = 0.1290\omega^2 + 0.6039\omega + 0.0877
.. math::
M = 0.1760\omega^2 - 0.2600\omega + 0.8884
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple(float[3]), optional
Coefficients L, M, N (also called C1, C2, C3) of TWU 1991 form, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (methanol), liquid phase:
>>> eos = PRTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=250., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000124374813374486, -34155.16119794619, -83.34913258614345)
Notes
-----
This variant offers substantial improvements to the PR-type EOSs - likely
getting about as accurate as this form of cubic equation can get.
References
----------
.. [1] Le Guennec, Yohann, Romain Privat, and Jean-Noël Jaubert.
"Development of the Translated-Consistent Tc-PR and Tc-RK Cubic
Equations of State for a Safe and Accurate Prediction of Volumetric,
Energetic and Saturation Properties of Pure Compounds in the Sub- and
Super-Critical Domains." Fluid Phase Equilibria 429 (December 15, 2016):
301-12. https://doi.org/10.1016/j.fluid.2016.09.003.
'''
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, alpha_coeffs=None, c=None, T=None,
P=None, V=None):
# estimates volume translation and alpha function parameters
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
# Limit the fitting omega to a little under the range reported
o = min(max(omega, -0.01), 1.48)
if c is None:
c = R*Tc*Pc_inv*(0.0198*o - 0.0065)
if alpha_coeffs is None:
L = o*(0.1290*o + 0.6039) + 0.0877
M = o*(0.1760*o - 0.2600) + 0.8884
N = 2.0
alpha_coeffs = (L, M, N)
self.c = c
self.alpha_coeffs = alpha_coeffs
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
self.a = self.c1*R2*Tc*Tc*Pc_inv
b0 = self.c2*R*Tc*Pc_inv
self.b = b = b0 - c
self.delta = 2.0*(c + b0)
self.epsilon = -b0*b0 + c*c + 2.0*c*b0
self.solve()
class PRSV(PR):
r'''Class for solving the Peng-Robinson-Stryjek-Vera equations of state for
a pure compound as given in [1]_. The same as the Peng-Robinson EOS,
except with a different `kappa` formula and with an optional fit parameter.
Subclasses :obj:`PR`, which provides only several constants. See :obj:`PR` for
further documentation and examples.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa = \kappa_0 + \kappa_1(1 + T_r^{0.5})(0.7 - T_r)
.. math::
\kappa_0 = 0.378893 + 1.4897153\omega - 0.17131848\omega^2
+ 0.0196554\omega^3
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
kappa1 : float, optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
Examples
--------
P-T initialization (hexane, with fit parameter in [1]_), liquid phase:
>>> eos = PRSV(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6, kappa1=0.05104)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000130126913554, -31698.926746, -74.16751538)
Notes
-----
[1]_ recommends that `kappa1` be set to 0 for Tr > 0.7. This is not done by
default; the class boolean `kappa1_Tr_limit` may be set to True and the
problem re-solved with that specified if desired. `kappa1_Tr_limit` is not
supported for P-V inputs.
Solutions for P-V solve for `T` with SciPy's `newton` solver, as there is no
analytical solution for `T`
[2]_ and [3]_ are two more resources documenting the PRSV EOS. [4]_ lists
`kappa` values for 69 additional compounds. See also `PRSV2`. Note that
tabulated `kappa` values should be used with the critical parameters used
in their fits. Both [1]_ and [4]_ only considered vapor pressure in fitting
the parameter.
References
----------
.. [1] Stryjek, R., and J. H. Vera. "PRSV: An Improved Peng-Robinson
Equation of State for Pure Compounds and Mixtures." The Canadian Journal
of Chemical Engineering 64, no. 2 (April 1, 1986): 323-33.
doi:10.1002/cjce.5450640224.
.. [2] Stryjek, R., and J. H. Vera. "PRSV - An Improved Peng-Robinson
Equation of State with New Mixing Rules for Strongly Nonideal Mixtures."
The Canadian Journal of Chemical Engineering 64, no. 2 (April 1, 1986):
334-40. doi:10.1002/cjce.5450640225.
.. [3] Stryjek, R., and J. H. Vera. "Vapor-liquid Equilibrium of
Hydrochloric Acid Solutions with the PRSV Equation of State." Fluid
Phase Equilibria 25, no. 3 (January 1, 1986): 279-90.
doi:10.1016/0378-3812(86)80004-8.
.. [4] Proust, P., and J. H. Vera. "PRSV: The Stryjek-Vera Modification of
the Peng-Robinson Equation of State. Parameters for Other Pure Compounds
of Industrial Interest." The Canadian Journal of Chemical Engineering
67, no. 1 (February 1, 1989): 170-73. doi:10.1002/cjce.5450670125.
'''
kappa1_Tr_limit = False
kwargs_keys = ('kappa1',)
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None, kappa1=None):
self.T, self.P, self.V, self.omega, self.Tc, self.Pc = T, P, V, omega, Tc, Pc
if kappa1 is None:
kappa1 = 0.0
self.kwargs = {'kappa1': kappa1}
self.b = b = self.c2R*Tc/Pc
self.a = self.c1R2_c2R*Tc*b
self.delta = 2.0*b
self.epsilon = -b*b
self.kappa0 = omega*(omega*(0.0196554*omega - 0.17131848) + 1.4897153) + 0.378893
self.check_sufficient_inputs()
if V and P:
# Deal with T-solution here; does NOT support kappa1_Tr_limit.
self.kappa1 = kappa1
self.T = T = self.solve_T(P, V)
Tr = T/Tc
else:
Tr = T/Tc
if self.kappa1_Tr_limit and Tr > 0.7:
self.kappa1 = 0.0
else:
self.kappa1 = kappa1
self.kappa = self.kappa0 + self.kappa1*(1.0 + sqrt(Tr))*(0.7 - Tr)
self.solve()
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the PRSV
EOS. Uses `Tc`, `a`, `b`, `kappa0` and `kappa` as well, obtained from
the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
Not guaranteed to produce a solution. There are actually two solution,
one much higher than normally desired; it is possible the solver could
converge on this.
'''
Tc, a, b, kappa0, kappa1 = self.Tc, self.a, self.b, self.kappa0, self.kappa1
self.no_T_spec = True
x0 = V - b
R_x0 = R/x0
x3_inv = 1.0/(100.*(V*(V + b) + b*x0))
x4 = 10.*kappa0
kappa110 = kappa1*10.
kappa17 = kappa1*7.
Tc_inv = 1.0/Tc
x51 = x3_inv*a
def to_solve(T):
x1 = T*Tc_inv
x2 = x1**0.5
x10 = ((x4 - (kappa110*x1 - kappa17)*(x2 + 1.))*(x2 - 1.) - 10.)
x11 = T*R_x0 - P
return x11 - x10*x10*x51
if solution is None:
try:
return newton(to_solve, Tc*0.5)
except:
pass
# The above method handles fewer cases, but the below is less optimized
return GCEOS.solve_T(self, P, V, solution=solution)
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `Tc`, `kappa0`,
`kappa1`, and `a`.
The `a_alpha` function is shown below; the first and second derivatives
are not shown for brevity.
.. math::
a\alpha = a \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{T_{c}}} + 1\right) \left(- \frac{T}{T_{c}} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{T_{c}}} + 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
The expressions can be derived as follows:
>>> from sympy import *
>>> P, T, V = symbols('P, T, V')
>>> Tc, Pc, omega = symbols('Tc, Pc, omega')
>>> R, a, b, kappa0, kappa1 = symbols('R, a, b, kappa0, kappa1')
>>> kappa = kappa0 + kappa1*(1 + sqrt(T/Tc))*(Rational(7, 10)-T/Tc)
>>> a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2
>>> # diff(a_alpha, T)
>>> # diff(a_alpha, T, 2)
Examples
--------
>>> eos = PRSV(Tc=507.6, Pc=3025000, omega=0.2975, T=406.08, P=1E6, kappa1=0.05104)
>>> eos.a_alpha_and_derivatives_pure(185.0)
(4.76865472591, -0.0101408587212, 3.9138298092e-05)
'''
Tc, a, kappa0, kappa1 = self.Tc, self.a, self.kappa0, self.kappa1
x1 = T/Tc
T_inv = 1.0/T
x2 = sqrt(x1)
x3 = x2 - 1.
x4 = 10.*x1 - 7.
x5 = x2 + 1.
x6 = 10.*kappa0 - kappa1*x4*x5
x7 = x3*x6
x8 = x7*0.1 - 1.
x10 = x6*T_inv
x11 = kappa1*x3
x12 = x4*T_inv
x13 = 20./Tc*x5 + x12*x2
x14 = -x10*x2 + x11*x13
a_alpha = a*x8*x8
da_alpha_dT = -a*x14*x8*0.1
d2a_alpha_dT2 = a*(x14*x14 - x2*T_inv*(x7 - 10.)*(2.*kappa1*x13 + x10 + x11*(40.*x1*T_inv - x12)))*(1.0/200.)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for this EOS. Uses the set values of `Tc`, `kappa0`,
`kappa1`, and `a`.
.. math::
a\alpha = a \left(\left(\kappa_{0} + \kappa_{1} \left(\sqrt{\frac{
T}{T_{c}}} + 1\right) \left(- \frac{T}{T_{c}} + \frac{7}{10}\right)
\right) \left(- \sqrt{\frac{T}{T_{c}}} + 1\right) + 1\right)^{2}
Parameters
----------
T : float
Temperature at which to calculate the value, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Notes
-----
This method does not alter the object's state and the temperature
provided can be a different than that of the object.
Examples
--------
>>> eos = PRSV(Tc=507.6, Pc=3025000, omega=0.2975, T=406.08, P=1E6, kappa1=0.05104)
>>> eos.a_alpha_pure(185.0)
4.7686547259
'''
Tc, a, kappa0, kappa1 = self.Tc, self.a, self.kappa0, self.kappa1
Tr = T/Tc
sqrtTr = sqrt(Tr)
v = ((kappa0 + kappa1*(sqrtTr + 1.0)*(-Tr + 0.7))*(-sqrtTr + 1.0) + 1.0)
return a*v*v
class PRSV2(PR):
r'''Class for solving the Peng-Robinson-Stryjek-Vera 2 equations of state
for a pure compound as given in [1]_. The same as the Peng-Robinson EOS,
except with a different `kappa` formula and with three fit parameters.
Subclasses :obj:`PR`, which provides only several constants. See :obj:`PR` for
further documentation and examples.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa = \kappa_0 + [\kappa_1 + \kappa_2(\kappa_3 - T_r)(1-T_r^{0.5})]
(1 + T_r^{0.5})(0.7 - T_r)
.. math::
\kappa_0 = 0.378893 + 1.4897153\omega - 0.17131848\omega^2
+ 0.0196554\omega^3
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
kappa1 : float, optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
kappa2 : float, optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
kappa : float, optional
Fit parameter; available in [1]_ for over 90 compounds, [-]
Examples
--------
P-T initialization (hexane, with fit parameter in [1]_), liquid phase:
>>> eos = PRSV2(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6, kappa1=0.05104, kappa2=0.8634, kappa3=0.460)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000130188257591, -31496.1841687, -73.615282963)
Notes
-----
Note that tabulated `kappa` values should be used with the critical
parameters used in their fits. [1]_ considered only vapor
pressure in fitting the parameter.
References
----------
.. [1] Stryjek, R., and J. H. Vera. "PRSV2: A Cubic Equation of State for
Accurate Vapor-liquid Equilibria Calculations." The Canadian Journal of
Chemical Engineering 64, no. 5 (October 1, 1986): 820-26.
doi:10.1002/cjce.5450640516.
'''
kwargs_keys = ('kappa1', 'kappa2', 'kappa3')
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None, kappa1=0, kappa2=0, kappa3=0):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.check_sufficient_inputs()
self.kwargs = {'kappa1': kappa1, 'kappa2': kappa2, 'kappa3': kappa3}
self.a = self.c1*R*R*Tc*Tc/Pc
self.b = self.c2*R*Tc/Pc
self.delta = 2*self.b
self.epsilon = -self.b*self.b
self.kappa0 = kappa0 = omega*(omega*(0.0196554*omega - 0.17131848) + 1.4897153) + 0.378893
self.kappa1, self.kappa2, self.kappa3 = kappa1, kappa2, kappa3
if self.V and self.P:
# Deal with T-solution here
self.T = T = self.solve_T(self.P, self.V)
Tr = T/Tc
sqrtTr = sqrt(Tr)
self.kappa = kappa0 + ((kappa1 + kappa2*(kappa3 - Tr)*(1.0 -sqrtTr))*(1.0 + sqrtTr)*(0.7 - Tr))
self.solve()
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the PRSV2
EOS. Uses `Tc`, `a`, `b`, `kappa0`, `kappa1`, `kappa2`, and `kappa3`
as well, obtained from the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
Not guaranteed to produce a solution. There are actually 8 solutions,
six with an imaginary component at a tested point. The two temperature
solutions are quite far apart, with one much higher than the other;
it is possible the solver could converge on the higher solution, so use
`T` inputs with care. This extra solution is a perfectly valid one
however. The secant method is implemented at present.
Examples
--------
>>> eos = PRSV2(Tc=507.6, Pc=3025000, omega=0.2975, T=400., P=1E6, kappa1=0.05104, kappa2=0.8634, kappa3=0.460)
>>> eos.solve_T(P=eos.P, V=eos.V_g)
400.0
'''
self.no_T_spec = True
if solution is None:
Tc, a, b, kappa0, kappa1, kappa2, kappa3 = self.Tc, self.a, self.b, self.kappa0, self.kappa1, self.kappa2, self.kappa3
x0 = V - b
R_x0 = R/x0
x5_inv = 1.0/(100.*(V*(V + b) + b*x0))
x4 = 10.*kappa0
def to_solve(T):
x1 = T/Tc
x2 = sqrt(x1)
x3 = x2 - 1.
x100 = (x3*(x4 - (kappa1 + kappa2*x3*(-kappa3 + x1))*(10.*x1 - 7.)*(x2 + 1.)) - 10.)
return (R_x0*T - a*x100*x100*x5_inv) - P
try:
return newton(to_solve, Tc*0.5)
except:
pass
# The above method handles fewer cases, but the below is less optimized
return GCEOS.solve_T(self, P, V, solution=solution)
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `Tc`, `kappa0`, `kappa1`,
`kappa2`, `kappa3`, and `a`.
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa = \kappa_0 + [\kappa_1 + \kappa_2(\kappa_3 - T_r)(1-T_r^{0.5})]
(1 + T_r^{0.5})(0.7 - T_r)
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
Notes
-----
The first and second derivatives of `a_alpha` are available through the
following SymPy expression.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V = symbols('P, T, V') # doctest:+SKIP
>>> Tc, Pc, omega = symbols('Tc, Pc, omega') # doctest:+SKIP
>>> R, a, b, kappa0, kappa1, kappa2, kappa3 = symbols('R, a, b, kappa0, kappa1, kappa2, kappa3') # doctest:+SKIP
>>> Tr = T/Tc # doctest:+SKIP
>>> kappa = kappa0 + (kappa1 + kappa2*(kappa3-Tr)*(1-sqrt(Tr)))*(1+sqrt(Tr))*(Rational('0.7')-Tr) # doctest:+SKIP
>>> a_alpha = a*(1 + kappa*(1-sqrt(T/Tc)))**2 # doctest:+SKIP
>>> diff(a_alpha, T) # doctest:+SKIP
>>> diff(a_alpha, T, 2) # doctest:+SKIP
Examples
--------
>>> eos = PRSV2(Tc=507.6, Pc=3025000, omega=0.2975, T=400., P=1E6, kappa1=0.05104, kappa2=0.8634, kappa3=0.460)
>>> eos.a_alpha_and_derivatives_pure(311.0)
(3.7245418495, -0.0066115440470, 2.05871011677e-05)
'''
Tc, a, kappa0, kappa1, kappa2, kappa3 = self.Tc, self.a, self.kappa0, self.kappa1, self.kappa2, self.kappa3
Tc_inv = 1.0/Tc
T_inv = 1.0/T
x1 = T*Tc_inv
x2 = sqrt(x1)
x3 = x2 - 1.
x4 = x2 + 1.
x5 = 10.*x1 - 7.
x6 = -kappa3 + x1
x7 = kappa1 + kappa2*x3*x6
x8 = x5*x7
x9 = 10.*kappa0 - x4*x8
x10 = x3*x9
x11 = x10*0.1 - 1.
x13 = x2*T_inv
x14 = x7*Tc_inv
x15 = kappa2*x4*x5
x16 = 2.*(-x2 + 1.)*Tc_inv + x13*(kappa3 - x1)
x17 = -x13*x8 - x14*(20.*x2 + 20.) + x15*x16
x18 = x13*x9 + x17*x3
x19 = x2*T_inv*T_inv
x20 = 2.*x2*T_inv
a_alpha = a*x11*x11
da_alpha_dT = a*x11*x18*0.1
d2a_alpha_dT2 = a*(x18*x18 + (x10 - 10.)*(x17*x20 - x19*x9
+ x3*(40.*kappa2*Tc_inv*x16*x4 + kappa2*x16*x20*x5
- 40.*T_inv*x14*x2 - x15*T_inv*x2*(4.*Tc_inv - x6*T_inv) + x19*x8)))*(1.0/200.)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for this EOS. Uses the set values of `Tc`, `kappa0`, `kappa1`,
`kappa2`, `kappa3`, and `a`.
.. math::
\alpha(T)=[1+\kappa(1-\sqrt{T_r})]^2
.. math::
\kappa = \kappa_0 + [\kappa_1 + \kappa_2(\kappa_3 - T_r)(1-T_r^{0.5})]
(1 + T_r^{0.5})(0.7 - T_r)
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
Examples
--------
>>> eos = PRSV2(Tc=507.6, Pc=3025000, omega=0.2975, T=400., P=1E6, kappa1=0.05104, kappa2=0.8634, kappa3=0.460)
>>> eos.a_alpha_pure(1276.0)
33.321674050
'''
Tc, a, kappa0, kappa1, kappa2, kappa3 = self.Tc, self.a, self.kappa0, self.kappa1, self.kappa2, self.kappa3
Tr = T/Tc
sqrtTr = sqrt(Tr)
kappa = kappa0 + ((kappa1 + kappa2*(kappa3 - Tr)*(1.0 -sqrtTr))*(1.0 + sqrtTr)*(0.7 - Tr))
x0 = (1.0 + kappa*(1.0 - sqrtTr))
return a*x0*x0
class VDW(GCEOS):
r'''Class for solving the Van der Waals [1]_ [2]_ cubic
equation of state for a pure compound. Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P=\frac{RT}{V-b}-\frac{a}{V^2}
.. math::
a=\frac{27}{64}\frac{(RT_c)^2}{P_c}
.. math::
b=\frac{RT_c}{8P_c}
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
omega : float, optional
Acentric factor - not used in equation of state!, [-]
Examples
--------
>>> eos = VDW(Tc=507.6, Pc=3025000, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000223329856081, -13385.7273746, -32.65923125)
Notes
-----
`omega` is allowed as an input for compatibility with the other EOS forms,
but is not used.
References
----------
.. [1] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
.. [2] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
delta = 0.0
'''`delta` is always zero for the :obj:`VDW` EOS'''
epsilon = 0.0
'''`epsilon` is always zero for the :obj:`VDW` EOS'''
omega = None
'''`omega` has no impact on the :obj:`VDW` EOS'''
Zc = 3.0/8.
'''Mechanical compressibility of :obj:`VDW` EOS'''
c1 = 27.0/64.0
c2 = 1.0/8.0
c1R2 = c1*R2
c2R = c2*R
c1R2_c2R = c1R2/c2R
Psat_coeffs_limiting = [-3.0232164484175756, 0.20980668241160666]
Psat_coeffs_critical = [9.575399398167086, -5.742004486758378,
4.8000085098196745, -3.000000002903554,
1.0000000000002651]
Psat_cheb_coeffs = [-3.0938407448693392, -3.095844800654779, -0.01852425171597184, -0.009132810281704463,
0.0034478548769173167, -0.0007513250489879469, 0.0001425235859202672, -3.18455900032599e-05,
8.318773833859442e-06, -2.125810773856036e-06, 5.171012493290658e-07, -1.2777009201877978e-07,
3.285945705657834e-08, -8.532047244427343e-09, 2.196978792832582e-09, -5.667409821199761e-10,
1.4779624173003134e-10, -3.878590467732996e-11, 1.0181633097391951e-11, -2.67662653922595e-12,
7.053635426397184e-13, -1.872821965868618e-13, 4.9443291800198297e-14, -1.2936198878592264e-14,
2.9072203628840998e-15, -4.935694864968698e-16, 2.4160767787481663e-15, 8.615748088927622e-16,
-5.198342841253312e-16, -2.19739320055784e-15, -1.0876309618559898e-15, 7.727786509661994e-16,
7.958450521858285e-16, 2.088444434750203e-17, -1.3864912907016191e-16]
Psat_cheb_constant_factor = (-1.0630005005005003, 0.9416200294550813)
Psat_cheb_coeffs_der = chebder(Psat_cheb_coeffs)
Psat_coeffs_critical_der = polyder(Psat_coeffs_critical[::-1])[::-1]
# old
# Psat_ranges_low = (0.01718036869384043, 0.17410832232527684, 0.8905407354904298, 2.2829574334301284, 4.426814725758547, 8.56993985840095, 17.61166533755772, 34.56469477688733, 77.37121667459365, 153.79268928425782, 211.1818863599383)
# Psat_coeffs_low = [[7.337186734125958e+20, -1.0310276898768364e+20, 6.571811243740342e+18, -2.5142563688011328e+17, 6438369447367660.0, -116506416837877.06, 1533164370692.7314, -14873856417.118628, 106700642.42147608, -562656.3974485798, 2148.713955632803, -5.548751336002541, -0.32195156938958336, 0.29998758814658794, -2.9999999917461664, -2.349558048120315e-12], [-47716.736350665735, 77971.24589571664, -57734.17539993985, 25680.2304910714, -7665.471399138683, 1624.1156139380123, -251.90520898330854, 29.14383349387213, -2.6381384873389155, 0.32059956364530534, -0.19926642267720887, 0.24817720022049916, -0.33257749010194143, 0.3000000932130581, -3.000000000833664, 3.2607250233240848e-12], [-0.00016633444687699065, 0.0015485952911635057, -0.006863235318198338, 0.019451003485722398, -0.04015910528306605, 0.06568824742274948, -0.09110903840180812, 0.11396786842400031, -0.13566008631414134, 0.15955575257376992, -0.19156365559810004, 0.2479007648557237, -0.33256946631897144, 0.29999987151721086, -2.9999999954189773, -5.750377951585506e-11], [-7.428009121019926e-08, 1.9162063229590812e-06, -2.309905985537932e-05, 0.00017322131856139868, -0.0009091957309876683, 0.003571436963791831, -0.010987705972491674, 0.027369774602560334, -0.05652165868271822, 0.0989022663430445, -0.15341525362487263, 0.22884492561559433, -0.325345424979866, 0.2980576936663475, -2.999671413421381, -2.6222517302443293e-05], [-1.304829383752042e-10, 6.461311349903354e-09, -1.4697523398500654e-07, 2.0279063155086473e-06, -1.8858104539128372e-05, 0.0001240696514816671, -0.0005894769787540043, 0.0020347845952011574, -0.0051863170297969646, 0.010970032738000915, -0.02673039729181951, 0.07989719021025658, -0.18981116875502171, 0.20975049244314053, -2.9633925585336254, -0.007040461060364933], [-8.483418232487059e-14, 8.63974862373075e-12, -4.1017440987789216e-10, 1.2043221191702449e-08, -2.4458795830937423e-07, 3.6395727498191334e-06, -4.098703864306506e-05, 0.00035554065503799216, -0.0023925268788656615, 0.012458721418882563, -0.04951152909450557, 0.14533920657540916, -0.29136837208180943, 0.2957408216961629, -2.992226481317782, -0.010497873866540886], [1.2057846439922303e-18, -2.498037369038981e-16, 2.4169298089982375e-14, -1.4499679111714204e-12, 6.038719260446175e-11, -1.8521190625043091e-09, 4.3303351110208387e-08, -7.880798028324914e-07, 1.130024120583253e-05, -0.00012841965962337936, 0.0011579326786125476, -0.008265573217317893, 0.04660472417726327, -0.20986415552448967, -2.53897638918461, -0.1886467797596083], [5.456456120655508e-23, -2.2443095704330367e-20, 4.312575509674247e-18, -5.139829986632021e-16, 4.2535953008708246e-14, -2.592784941046291e-12, 1.2047971820045452e-10, -4.356922932676532e-09, 1.2408018207471443e-07, -2.797822690789934e-06, 4.99619390186859e-05, -0.0007038763395268029, 0.007780538926245783, -0.06771345498616513, -2.871932127187338, 0.18812475659373717], [7.867249251522955e-28, -6.9880843771805455e-25, 2.8943351684355176e-22, -7.420582747372289e-20, 1.3183352580324197e-17, -1.7213805273492628e-15, 1.7095155009610707e-13, -1.3180402544717915e-11, 7.98160114045682e-10, -3.815575213074619e-08, 1.4395507736418072e-06, -4.266216879490852e-05, 0.0009859688747049194, -0.01775904876058947, -3.107864564311215, 0.729035082405801], [1.8311839997067204e-31, -3.0736833050041296e-28, 2.3992665338781007e-25, -1.1556016072304783e-22, 3.842081607645237e-20, -9.344614680779128e-18, 1.7187598144123416e-15, -2.436914034605369e-13, 2.6895257030219836e-11, -2.3163949881188256e-09, 1.5507887324406105e-07, -7.988762893413175e-06, 0.0003116704944009503, -0.00906717310261831, -3.1702256862598945, 0.8792501521348868], [3.527506950185549e-29, -9.295645359865239e-26, 1.1416182235481363e-22, -8.667770574363085e-20, 4.550011954816112e-17, -1.7491901974552787e-14, 5.087496940001709e-12, -1.1399408706170077e-09, 1.9839477686722418e-07, -2.6819268483650753e-05, 0.0027930054114231415, -0.2200566323239571, 12.697254920421862, -506.5185701848038, 12488.177914134796, -143563.720494474]]
Psat_ranges_low = (0.01718036869384043, 0.17410832232527684, 0.8905407354904298, 2.2829574334301284, 4.588735762374165, 9.198213969343113, 19.164801360905702, 39.162202265367675, 89.80614296441635, 211.1818863599383)
Psat_coeffs_low = [[3.709170427241228e+20, -5.369876399773602e+19, 3.55437646625649e+18, -1.4237055014239443e+17, 3848808938963197.0, -74140204939074.31, 1047156764817.9473, -10990806592.937004, 85953150.78472495, -497625.69096407224, 2099.583641752129, -6.04136142777935, -0.31974206497045565, 0.29998332413453277, -2.9999999877560994, -3.795894154556834e-12], [202181.2230940579, -296048.9678516585, 197754.0893494042, -79779.91915062582, 21690.65516277411, -4199.021355925494, 596.0913050938211, -62.88287531914928, 4.838615877346316, -0.13231119871015717, -0.17907064524060426, 0.24752941358391634, -0.33256309490225905, 0.29999988493075114, -2.9999999990847974, -3.155253835984695e-12], [-0.00014029558609732686, 0.0013389844121899472, -0.0060888948581993155, 0.017710934821923482, -0.0375010580271008, 0.06276682660210708, -0.08872419862950512, 0.11249655170661062, -0.1349689265044028, 0.15930871464919516, -0.19149706848521642, 0.24788748254751106, -0.3325675697571886, 0.2999996886491002, -2.999999984780528, -3.387914393471192e-10], [-7.63500937520021e-08, 1.963108844693943e-06, -2.3590460584275485e-05, 0.0001763784708554673, -0.0009231029234653729, 0.0036159164438775227, -0.011094383271741722, 0.027565091035355725, -0.05679683707503139, 0.09920052620961826, -0.1536618676986315, 0.22899765112464673, -0.32541398139525823, 0.29807874689899555, -2.9996753674171845, -2.5880253545551568e-05], [-8.042041394684212e-11, 3.985029597822566e-09, -9.007790594461407e-08, 1.222381124984873e-06, -1.1000101355126748e-05, 6.812347222100813e-05, -0.00028917548838158866, 0.0007973445988826675, -0.0012398033018025893, 0.0012288503460726383, -0.008274005514143725, 0.05353750365067098, -0.16234102442586626, 0.19003067203938392, -2.9546730846617515, -0.008830685622417178], [-3.611616220031973e-14, 3.8767347748860204e-12, -1.9376800248066728e-10, 5.982204949605256e-09, -1.2756854237545315e-07, 1.9899440138492874e-06, -2.344690467724149e-05, 0.00021230626049088423, -0.0014868552467794268, 0.008024812789608007, -0.03284208875861816, 0.0980794205896591, -0.19356262181013717, 0.15625529853158554, -2.8696503839999488, -0.06053293499127932], [3.9101454054983654e-19, -8.77263958995087e-17, 9.190024742411764e-15, -5.968146499787873e-13, 2.6900134213207918e-11, -8.926852543915742e-10, 2.2576199533792054e-08, -4.44287432728095e-07, 6.886340930232061e-06, -8.455663245991894e-05, 0.0008233162588686339, -0.006341223591956234, 0.038529175147467405, -0.1865188282734164, -2.580546988555211, -0.15427433578447847], [1.0947478032468857e-23, -5.042777995894199e-21, 1.0846467451595606e-18, -1.4462299284667368e-16, 1.3382745001929549e-14, -9.116107354161303e-13, 4.730996512803351e-11, -1.9095977742126322e-09, 6.06591823816572e-08, -1.524502829215733e-06, 3.0318007813133075e-05, -0.00047520009323191216, 0.005836174792523882, -0.056313989030004126, -2.913137841802409, 0.2573512083085632], [1.0417003605891542e-28, -1.0617427986062235e-25, 5.045404400073696e-23, -1.4839108957248214e-20, 3.023754129891556e-18, -4.527590970541259e-16, 5.155153568793495e-14, -4.555877665139844e-12, 3.161476174754809e-10, -1.731323722560287e-08, 7.479908191797176e-07, -2.537158469008294e-05, 0.0006706500253454564, -0.013799582450476145, -3.1384761956985416, 0.8388793704120587], [3.619666517093681e-34, -8.603334584486169e-31, 9.530335406766774e-28, -6.531547415539122e-25, 3.100047792152191e-22, -1.0806960083892972e-19, 2.863332612760165e-17, -5.885004890375537e-15, 9.491165245604907e-13, -1.207013136689613e-10, 1.2097185039766346e-08, -9.505275874736017e-07, 5.807225072909577e-05, -0.002750509744231165, -3.2675197703421506, 1.578136241005211]]
phi_sat_coeffs = [-4.703247660146169e-06, 7.276853488756492e-05, -0.0005008397610615123,
0.0019560274384829595, -0.004249875101260566, 0.001839985687730564,
0.02021191780955066, -0.07056928933569773, 0.09941120467466309,
0.021295687530901747, -0.32582447905247514, 0.521321793740683,
0.6950957738017804]
_P_zero_l_cheb_coeffs = [0.23949680596158576, -0.28552048884377407, 0.17223773827357045, -0.10535895068953466, 0.06539081523178862, -0.04127943642449526, 0.02647106353835149, -0.017260750015435533, 0.011558172064668568, -0.007830624115831804, 0.005422844032253547, -0.00383463423135285, 0.0027718803475398936, -0.0020570084561681613, 0.0015155074622906842, -0.0011495238177958583, 0.000904782154904249, -0.000683347677699564, 0.0005800187592994201, -0.0004529246894177611, 0.00032901743817593566, -0.0002990561659229427, 0.00023524411148843384, -0.00019464055011993858, 0.0001441665975916752, -0.00013106835607900116, 9.72812311007959e-05, -7.611327134024459e-05, 5.240433315348986e-05, -3.6415012576658176e-05, 3.89310794418167e-05, -2.2160354688301534e-05, 2.7908599229672926e-05, 1.6405692108915904e-05, -1.3931165551671343e-06, -4.80770003354232e-06]
P_zero_l_cheb_limits = (0.002354706203222534, 9.0)
main_derivatives_and_departures = staticmethod(main_derivatives_and_departures_VDW)
def __init__(self, Tc, Pc, T=None, P=None, V=None, omega=None):
self.Tc = Tc
self.Pc = Pc
self.T = T
self.P = P
self.V = V
Tc_Pc = Tc/Pc
self.a = (27.0/64.0)*R2*Tc*Tc_Pc
self.b = 0.125*R*Tc_Pc
self.solve()
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `a`.
.. math::
a\alpha = a
.. math::
\frac{d a\alpha}{dT} = 0
.. math::
\frac{d^2 a\alpha}{dT^2} = 0
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
return self.a, 0.0, 0.0
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha`. Uses the set values of `a`.
.. math::
a\alpha = a
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
return self.a
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the :obj:`VDW`
EOS. Uses `a`, and `b`, obtained from the class's namespace.
.. math::
T = \frac{1}{R V^{2}} \left(P V^{2} \left(V - b\right)
+ V a - a b\right)
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
'''
self.no_T_spec = True
return (P*V**2*(V - self.b) + V*self.a - self.a*self.b)/(R*V**2)
def T_discriminant_zeros_analytical(self, valid=False):
r'''Method to calculate the temperatures which zero the discriminant
function of the :obj:`VDW` eos. This is an analytical cubic function solved
analytically.
Parameters
----------
valid : bool
Whether to filter the calculated temperatures so that they are all
real, and positive only, [-]
Returns
-------
T_discriminant_zeros : list[float]
Temperatures which make the discriminant zero, [K]
Notes
-----
Calculated analytically. Derived as follows. Has multiple solutions.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, b, a = symbols('P, T, V, R, b, a') # doctest:+SKIP
>>> delta, epsilon = 0, 0 # doctest:+SKIP
>>> eta = b # doctest:+SKIP
>>> B = b*P/(R*T) # doctest:+SKIP
>>> deltas = delta*P/(R*T) # doctest:+SKIP
>>> thetas = a*P/(R*T)**2 # doctest:+SKIP
>>> epsilons = epsilon*(P/(R*T))**2 # doctest:+SKIP
>>> etas = eta*P/(R*T) # doctest:+SKIP
>>> a_coeff = 1 # doctest:+SKIP
>>> b_coeff = (deltas - B - 1) # doctest:+SKIP
>>> c = (thetas + epsilons - deltas*(B+1)) # doctest:+SKIP
>>> d = -(epsilons*(B+1) + thetas*etas) # doctest:+SKIP
>>> disc = b_coeff*b_coeff*c*c - 4*a_coeff*c*c*c - 4*b_coeff*b_coeff*b_coeff*d - 27*a_coeff*a_coeff*d*d + 18*a_coeff*b_coeff*c*d # doctest:+SKIP
>>> base = -(expand(disc/P**2*R**3*T**3/a)) # doctest:+SKIP
>>> base_T = simplify(base*T**3) # doctest:+SKIP
>>> sln = collect(expand(base_T), T).args # doctest:+SKIP
'''
P, a, b = self.P, self.a_alpha, self.b
b2 = b*b
x1 = P*b2
x0 = 12.0*x1
d = 4.0*P*R_inv2*R_inv*(a*a + x1*(x1 + 2.0*a))
c = (x0 - 20.0*a)*R_inv2*b*P
b_coeff = (x0 - a)*R_inv
a_coeff = 4.0*b
roots = roots_cubic(a_coeff, b_coeff, c, d)
# roots = np.roots([a_coeff, b_coeff, c, d]).tolist()
if valid:
# TODO - only include ones when switching phases from l/g to either g/l
# Do not know how to handle
roots = [r.real for r in roots if (r.real >= 0.0 and (abs(r.imag) <= 1e-12))]
roots.sort()
return roots
@staticmethod
def P_discriminant_zeros_analytical(T, b, delta, epsilon, a_alpha, valid=False):
r'''Method to calculate the pressures which zero the discriminant
function of the :obj:`VDW` eos. This is an cubic function solved
analytically.
Parameters
----------
T : float
Temperature, [K]
b : float
Coefficient calculated by EOS-specific method, [m^3/mol]
delta : float
Coefficient calculated by EOS-specific method, [m^3/mol]
epsilon : float
Coefficient calculated by EOS-specific method, [m^6/mol^2]
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
valid : bool
Whether to filter the calculated pressures so that they are all
real, and positive only, [-]
Returns
-------
P_discriminant_zeros : tuple[float]
Pressures which make the discriminant zero, [Pa]
Notes
-----
Calculated analytically. Derived as follows. Has multiple solutions.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, b, a = symbols('P, T, V, R, b, a') # doctest:+SKIP
>>> P_vdw = R*T/(V-b) - a/(V*V) # doctest:+SKIP
>>> delta, epsilon = 0, 0 # doctest:+SKIP
>>> eta = b # doctest:+SKIP
>>> B = b*P/(R*T) # doctest:+SKIP
>>> deltas = delta*P/(R*T) # doctest:+SKIP
>>> thetas = a*P/(R*T)**2 # doctest:+SKIP
>>> epsilons = epsilon*(P/(R*T))**2 # doctest:+SKIP
>>> etas = eta*P/(R*T) # doctest:+SKIP
>>> a_coeff = 1 # doctest:+SKIP
>>> b_coeff = (deltas - B - 1) # doctest:+SKIP
>>> c = (thetas + epsilons - deltas*(B+1)) # doctest:+SKIP
>>> d = -(epsilons*(B+1) + thetas*etas) # doctest:+SKIP
>>> disc = b_coeff*b_coeff*c*c - 4*a_coeff*c*c*c - 4*b_coeff*b_coeff*b_coeff*d - 27*a_coeff*a_coeff*d*d + 18*a_coeff*b_coeff*c*d # doctest:+SKIP
>>> base = -(expand(disc/P**2*R**3*T**3/a)) # doctest:+SKIP
>>> collect(base, P).args # doctest:+SKIP
'''
# T, a_alpha = self.T, self.a_alpha
# a = a_alpha
# b, epsilon, delta = self.b, self.epsilon, self.delta
a = a_alpha
d = 4*b - a/(R*T)
c = (12*b**2/(R*T) - 20*a*b/(R**2*T**2) + 4*a**2/(R**3*T**3))
b_coeff = (12*b**3/(R**2*T**2) + 8*a*b**2/(R**3*T**3))
a_coeff = 4*b**4/(R**3*T**3)
roots = roots_cubic(a_coeff, b_coeff, c, d)
# roots = np.roots([a_coeff, b_coeff, c, d]).tolist()
return roots
class RK(GCEOS):
r'''Class for solving the Redlich-Kwong [1]_ [2]_ [3]_ cubic
equation of state for a pure compound. Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P =\frac{RT}{V-b}-\frac{a}{V\sqrt{\frac{T}{T_{c}}}(V+b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2.5}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
>>> eos = RK(Tc=507.6, Pc=3025000, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000151893468781, -26160.8424877, -63.013137852)
Notes
-----
`omega` is allowed as an input for compatibility with the other EOS forms,
but is not used.
References
----------
.. [1] Redlich, Otto., and J. N. S. Kwong. "On the Thermodynamics of
Solutions. V. An Equation of State. Fugacities of Gaseous Solutions."
Chemical Reviews 44, no. 1 (February 1, 1949): 233-44.
doi:10.1021/cr60137a013.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
.. [3] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
c1 = 0.4274802335403414043909906940611707345513 # 1/(9*(2**(1/3.)-1))
'''Full value of the constant in the `a` parameter'''
c2 = 0.08664034996495772158907020242607611685675 # (2**(1/3.)-1)/3
'''Full value of the constant in the `b` parameter'''
epsilon = 0.0
'''`epsilon` is always zero for the :obj:`RK` EOS'''
omega = None
'''`omega` has no impact on the :obj:`RK` EOS'''
Zc = 1.0/3.
'''Mechanical compressibility of :obj:`RK` EOS'''
c1R2 = c1*R2
c2R = c2*R
c1R2_c2R = c1R2/c2R
Psat_coeffs_limiting = [-72.700288369511583, -68.76714163049]
Psat_coeffs_critical = [1129250.3276866912, 4246321.053155941,
5988691.4873851035, 3754317.4112657467,
882716.2189281426]
Psat_cheb_coeffs = [-6.8488798834192215, -6.93992806360099, -0.11216113842675507, 0.0022494496508455135,
0.00995148012561513, -0.005789786392208277, 0.0021454644555051177, -0.0006192510387981658,
0.00016870584348326536, -5.828094356536212e-05, 2.5829410448955883e-05, -1.1312372380559225e-05,
4.374040785359406e-06, -1.5546789700246184e-06, 5.666723613325655e-07, -2.2701147218271074e-07,
9.561199996134724e-08, -3.934646467524511e-08, 1.55272396700466e-08, -6.061097474369418e-09,
2.4289648176102022e-09, -1.0031987621530753e-09, 4.168016003137324e-10, -1.7100917451312765e-10,
6.949731049432813e-11, -2.8377758503521713e-11, 1.1741734564892428e-11, -4.891469634936765e-12,
2.0373765879672795e-12, -8.507821454718095e-13, 3.4975627537410514e-13, -1.4468659018281038e-13,
6.536766028637786e-14, -2.7636123641275323e-14, 1.105377996166862e-14]
Psat_cheb_constant_factor = (0.8551757791729341, 9.962912449541513)
Psat_cheb_coeffs_der = chebder(Psat_cheb_coeffs)
Psat_coeffs_critical_der = polyder(Psat_coeffs_critical[::-1])[::-1]
phi_sat_coeffs = [156707085.9178746, 1313005585.0874271, 4947242291.244957,
11038959845.808495, 16153986262.1129, 16199294577.496677,
11273931409.81048, 5376831929.990161, 1681814895.2875218,
311544335.80653775, 25954329.68176187]
Psat_ranges_low = (0.033797068457719265, 0.06786604965443845, 0.1712297613585108, 0.34622987689428786, 0.7712336381743264, 1.745621379817678, 3.7256294306343207, 6.581228646986647, 12.781884795234102, 24.412307224840184, 48.39951213433041, 99.16043966361465, 206.52538850089107)
Psat_coeffs_low = [[-2.0473027805583304e+16, 5590407450630671.0, -691651500345804.2, 51280971870837.32, -2539543204646.707, 88630534161.11136, -2241691916.726171, 41616814.12629884, -568152.2176538995, 5661.177783316078, -40.73060128671707, 0.5116910477178825, -0.3837083115168163, 0.33323887045969375, -3.0536215774324824, 7.04644675941779e-13], [308999097192961.8, -225585388170583.75, 76474322668841.14, -15968075011367.047, 2296463426010.7324, -240935052543.90527, 19048380996.66752, -1155476440.462194, 54215636.4938359, -1967442.1357566533, 54762.443278119186, -1147.7177667787553, 17.16085684519316, 0.14875852741749432, -3.052428192332575, -3.5796888761263634e-06], [-19650136.181735344, 31781193.928728923, -23482166.19636667, 10469551.38856498, -3130203.8712914013, 658159.0021417511, -98797.59681465346, 10408.710407311624, -708.5507506253232, 20.511506773041855, 1.1894690300458857, 0.09491188992340026, -0.3699746434334233, 0.3327794679246402, -3.053612527869441, -7.854416195218761e-08], [27999.20768241787, -105403.37695226824, 184231.72791343703, -198316.18594155577, 147020.31646897402, -79504.54402254449, 32396.282402624805, -10128.009032305941, 2449.0150598681134, -457.86581824588563, 65.46517369332473, -6.79444639388651, 0.17688177142370798, 0.30286151881162965, -3.052607289294409, -1.571427065993891e-05], [0.03260876969558781, -0.2687907725537127, 1.0267017754905683, -2.4094567772527298, 3.8817887528347166, -4.539672024261187, 3.9650465848385767, -2.6039495096751812, 1.2459350220734762, -0.3527513106804435, -0.07989160047420704, 0.2668158546701413, -0.37571189865522364, 0.33238476075275014, -3.053560614808337, -2.0178433899342707e-06], [-7.375386804046186e-07, 1.5495867993995408e-05, -0.00015284972568903337, 0.0009416918743616606, -0.0040706288679345, 0.013170886849436726, -0.033323844522302436, 0.0682465756200053, -0.11659032663823216, 0.171245314502395, -0.22682939669727753, 0.29437853298508776, -0.3782483397093579, 0.33220274751099305, -3.053481229409229, -8.962708854642898e-06], [-9.64878085834743e-10, 4.3676512898669364e-08, -9.195173654927243e-07, 1.192726889996796e-05, -0.00010641091957706412, 0.0006901346742808183, -0.0033536294555478845, 0.012421045099127406, -0.03550565516993044, 0.07992816727224494, -0.14853460918439382, 0.24510479474467387, -0.35702531869477633, 0.32684341660780225, -3.053040405250035, 6.241135226403571e-05], [-3.5002452124187664e-13, 3.2833906636665076e-11, -1.430637752387875e-09, 3.84703168582193e-08, -7.149328354678022e-07, 9.737245185617094e-06, -0.0001004961143222046, 0.0008008011389293154, -0.004967550562579805, 0.023964447789498418, -0.08887464138262652, 0.24644247973322814, -0.4795516986170165, 0.5340043615968529, -3.218003440328873, 0.0551398944675352], [5.034255484339691e-17, -7.864397963490581e-15, 5.752225082446006e-13, -2.615885881066589e-11, 8.282570031686524e-10, -1.9374016535411608e-08, 3.4664702879779335e-07, -4.845857364906252e-06, 5.359278262994369e-05, -0.00047191509173119853, 0.003314508017194061, -0.018546171084714562, 0.08264133469800018, -0.2976511343203724, -2.45051660976546, -0.2780886842259136], [6.989268516115097e-21, -2.0569549793133175e-18, 2.829181210013469e-16, -2.4145085922204994e-14, 1.4314693471489465e-12, -6.253869824774858e-11, 2.0839676060467857e-09, -5.407898264555903e-08, 1.10600368057607e-06, -1.7926789523973723e-05, 0.00023042013161443632, -0.0023410716986771423, 0.018721566225139197, -0.11858986125950052, -2.7694948645429545, -0.005601354706335826], [4.123334769968695e-25, -2.3690486736737077e-22, 6.357338194540137e-20, -1.0578385702299804e-17, 1.2218888207640753e-15, -1.0392124936479462e-13, 6.735204427884249e-12, -3.395649622455689e-10, 1.3474573409568848e-08, -4.230554509480506e-07, 1.0508993131776807e-05, -0.000205653116778391, 0.0031500689366463458, -0.037812816408928154, -3.0367716832005502, 0.42028748728880316], [1.2637873346500257e-29, -1.4691821304898006e-26, 7.97371063405237e-24, -2.6821456640173466e-21, 6.259659753789673e-19, -1.0750710034717391e-16, 1.4061365634529063e-14, -1.4296683760114274e-12, 1.1431380830397977e-10, -7.224377488638715e-09, 3.607342375851496e-07, -1.4162123664055414e-05, 0.0004338140901526731, -0.010352095429488306, -3.214333239715912, 0.9750918877217316], [2.4467625180409936e-34, -5.900060775579966e-31, 6.640727340409195e-28, -4.631434540342247e-25, 2.240558311318526e-22, -7.974393923385324e-20, 2.1607792871336745e-17, -4.549744513625842e-15, 7.53070203647074e-13, -9.846740932533425e-11, 1.0165520378636594e-08, -8.242924637302648e-07, 5.2066757251344576e-05, -0.002554238879420431, -3.3164175313132174, 1.6226038152294109]]
# Thought the below was better - is failing. Need better ranges
# Psat_ranges_low = (0.002864867449609054, 0.00841672469500749, 0.016876032463065772, 0.0338307436429719, 0.06789926110832244, 0.17126106018604287, 0.346311983890616, 0.7714158394657995, 1.7457236753368228, 3.726128003826199, 6.581228646986647, 12.781884795234102, 24.412307224840184, 48.39951213433041, 99.16043966361465, 206.52538850089107)
# Psat_coeffs_low = [[7.4050168086686e+36, -2.3656234050215595e+35, 3.5140849350729625e+33, -3.219960263668302e+31, 2.0353831678446376e+29, -9.401903615685883e+26, 3.27870193930672e+24, -8.790141973297096e+21, 1.8267653379529515e+19, -2.9430079135183156e+16, 36458127001690.625, -34108081817.37184, 23328876.278645795, -11013.595381204012, 0.15611893268569021, -0.0004353146166747694], [-9.948446571395775e+25, 5.19510926774992e+24, -7.189006307350884e+22, -1.5680848799655576e+21, 8.249811116671015e+19, -1.6950901342526528e+18, 2.1845526238020284e+16, -197095048940187.34, 1299650648245.581, -6369933293.417471, 23232428.90696315, -62270.805040856954, 118.78599182230242, 0.17914187929689024, -3.053500975368179, -4.3120679728281264e-08], [9.397084120968658e+23, -1.7568386783383563e+23, 1.5270752841173062e+22, -8.18635952561484e+20, 3.0268596563705795e+19, -8.176367091459369e+17, 1.6669079239736548e+16, -261159410245086.28, 3170226264128.2866, -29816091359.74791, 215481434.94837964, -1175107.961691127, 4680.2781732486055, -12.521703234052518, -3.031856323244789, -1.7125428611007576e-05], [-5.837633410530431e+19, 2.2354665725528674e+19, -3.9748648336736783e+18, 4.353010078685106e+17, -3.2833626753213436e+16, 1806702598986387.0, -74919223763900.97, 2383883719613.177, -58681349911.8085, 1117424654.4682977, -16325312.825120728, 179698.66647387162, -1442.9221906440623, 8.305809571517658, -3.0807467744908252, 4.282801743092646e-05], [371247743335500.9, -296343269154610.4, 109587709190787.98, -24907918727301.76, 3891709522603.097, -442798937995.7918, 37904104192.54725, -2485814525.910429, 125929932.89424846, -4928093.141384071, 147763.74249085123, -3333.473113107637, 54.40288611988696, -0.28587642030919863, -3.04931950649037, -1.3857488372237547e-05], [16208032.322457436, -28812339.698805887, 23769768.592785712, -12069159.698844183, 4216830.274106061, -1073538.4841311441, 205658.8661178185, -30177.62398799642, 3418.24878855674, -298.57690432008536, 19.71936439114255, -0.6937801749294776, -0.34639354927988253, 0.3323199955834364, -3.053607492200311, -9.988880111944098e-08], [-27604.735354758857, 105025.68737778495, -185570.40811520678, 201981.46688618566, -151443.86118322334, 82852.55333788031, -34164.546515476206, 10812.149811177835, -2647.7278495315263, 501.83786547752425, -73.19000295074032, 8.299579502519556, -1.0215340085992137, 0.3683751980772054, -3.0548122319685604, 1.8727318915723323e-05], [0.005501339854516574, -0.030871868660992247, 0.06262306950800559, -0.016054714694926787, -0.19049678051599286, 0.4916342706149181, -0.6988226755673006, 0.6995056451067202, -0.5569157797560214, 0.40541813786380926, -0.32359936610250595, 0.32562413248075583, -0.38602496065455755, 0.33362571128345786, -3.0536522381393123, 1.1116248239684268e-06], [-7.625288226006888e-07, 1.592958164613657e-05, -0.00015633640254263914, 0.0009589163584223709, -0.004129113991499528, 0.01331550052681755, -0.033592935849933975, 0.06863043023079776, -0.11701377319077316, 0.17160678539192847, -0.22706640260572322, 0.2944958500921784, -0.3782908182523437, 0.3322133797180262, -3.0534828760507775, -8.843634685451462e-06], [-9.602485478694793e-10, 4.349693466931302e-08, -9.162924468728032e-07, 1.1891710154307035e-05, -0.00010614176529656917, 0.0006886536353876484, -0.003347511076288892, 0.012401728251598975, -0.03545868060099854, 0.07984021871367283, -0.1484089324448234, 0.24497026761102456, -0.35692097886460605, 0.32678811005407216, -3.0530225111683147, 5.975140457969985e-05], [-3.271879015155258e-13, 3.106593030234596e-11, -1.3669954450963802e-09, 3.7057309629853086e-08, -6.932939676452128e-07, 9.49513835382941e-06, -9.845166709551053e-05, 0.000787533270198052, -0.0049008339165666475, 0.023704510936127153, -0.08809636022958753, 0.24468388333303692, -0.4766488164995153, 0.5306997327949121, -3.2156835292074972, 0.05438278235662608], [5.027674758216368e-17, -7.856316772563329e-15, 5.74771486417108e-13, -2.6143808092714456e-11, 8.279258549554745e-10, -1.9369063289334646e-08, 3.4659817641020265e-07, -4.8455984691411336e-06, 5.3593276238540865e-05, -0.0004719384663395697, 0.003314740905347161, -0.018547549367299895, 0.08264669293617401, -0.29766465209956755, -2.450496401425025, -0.2781023348227336], [7.050221439220754e-21, -2.0731943309577094e-18, 2.84928020400626e-16, -2.429837955412963e-14, 1.4395267045494148e-12, -6.284786016386046e-11, 2.092913781550848e-09, -5.427778893624791e-08, 1.1094245992741828e-06, -1.7972372175390485e-05, 0.0002308866520395049, -0.002344673517182507, 0.018741874333835694, -0.11866881228864186, -2.7693056026459097, -0.00581227798597439], [4.1192237682040195e-25, -2.366064116476803e-22, 6.347935162866163e-20, -1.0560893898667048e-17, 1.2197114527874372e-15, -1.037276215071595e-13, 6.722432950732127e-12, -3.389265643942945e-10, 1.3450132487487853e-08, -4.2233752380466274e-07, 1.0492923968957467e-05, -0.00020538369710127645, 0.003146790904310898, -0.0377854748223825, -3.036911550333188, 0.4206184380126672], [1.2604142367817428e-29, -1.4652181650108445e-26, 7.952241970757233e-24, -2.6750298800006056e-21, 6.243504642033308e-19, -1.0724081450429274e-16, 1.4028428417591862e-14, -1.4265539327144317e-12, 1.1408674781257439e-10, -7.211610181866389e-09, 3.6018494473345496e-07, -1.4144362093631258e-05, 0.0004333961723540267, -0.010345339188037806, -3.2144003543293014, 0.9754007550013739], [2.4330594298876834e-34, -5.870171131499116e-31, 6.610469013522427e-28, -4.6125772590980585e-25, 2.232467903153395e-22, -7.949083788375843e-20, 2.1548151171435654e-17, -4.538965432777591e-15, 7.515638568810398e-13, -9.83046463909207e-11, 1.0152034186437456e-08, -8.234510061656029e-07, 5.202848938898853e-05, -0.002553041403736697, -3.316440584285066, 1.6228096260313123]]
def __init__(self, Tc, Pc, T=None, P=None, V=None, omega=None):
self.Tc = Tc
self.Pc = Pc
self.T = T
self.P = P
self.V = V
self.omega = omega
self.a = self.c1R2*Tc*Tc/Pc
self.b = self.delta = self.c2R*Tc/Pc
self.solve()
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `a`.
.. math::
a\alpha = \frac{a}{\sqrt{\frac{T}{T_{c}}}}
.. math::
\frac{d a\alpha}{dT} = - \frac{a}{2 T\sqrt{\frac{T}{T_{c}}}}
.. math::
\frac{d^2 a\alpha}{dT^2} = \frac{3 a}{4 T^{2}\sqrt{\frac{T}{T_{c}}}}
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
Tc = self.Tc
sqrt_Tr_inv = (T/Tc)**-0.5
a_alpha = self.a*sqrt_Tr_inv
T_inv = 1.0/T
da_alpha_dT = -0.5*self.a*T_inv*sqrt_Tr_inv
d2a_alpha_dT2 = 0.75*self.a*T_inv*T_inv*sqrt_Tr_inv
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for this EOS. Uses the set
values of `a`.
.. math::
a\alpha = \frac{a}{\sqrt{\frac{T}{T_{c}}}}
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
Tc = self.Tc
sqrt_Tr_inv = sqrt(Tc/T)
return self.a*sqrt_Tr_inv
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the RK
EOS. Uses `a`, and `b`, obtained from the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
The exact solution can be derived as follows; it is excluded for
breviety.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R = symbols('P, T, V, R') # doctest:+SKIP
>>> Tc, Pc = symbols('Tc, Pc') # doctest:+SKIP
>>> a, b = symbols('a, b') # doctest:+SKIP
>>> RK = Eq(P, R*T/(V-b) - a/sqrt(T)/(V*V + b*V)) # doctest:+SKIP
>>> solve(RK, T) # doctest:+SKIP
'''
a, b = self.a, self.b
a = a*self.Tc**0.5
# print([R, V, b, P, a])
if solution is None:
# COnfirmed with mpmath - has numerical issues
x0 = 3**0.5
x1 = 1j*x0
x2 = x1 + 1.0
x3 = V + b
x4 = V - b
x5 = x4/R
x6 = (x0*(x4**2*(-4*P**3*x5 + 27*a**2/(V**2*x3**2))/R**2+0.0j)**0.5 - 9*a*x5/(V*x3))**0.333333333333333
x7 = 0.190785707092222*x6
x8 = P*x5/x6
x9 =1.7471609294726*x8
x10 = 1.0 - x1
slns = [(x2*x7 + x9/x2)**2,
(x10*x7 + x9/x10)**2,
(0.381571414184444*x6 + 0.873580464736299*x8)**2]
try:
self.no_T_spec = True
x1 = -1.j*1.7320508075688772 + 1.
x2 = V - b
x3 = x2/R
x4 = V + b
x5 = (1.7320508075688772*(x2*x2*(-4.*P*P*P*x3 + 27.*a*a/(V*V*x4*x4))/(R*R))**0.5 - 9.*a*x3/(V*x4) +0j)**(1./3.)
T_sln = (3.3019272488946263*(11.537996562459266*P*x3/(x1*x5) + 1.2599210498948732*x1*x5)**2/144.0).real
if T_sln > 1e-3:
return T_sln
except:
pass
# Turns out the above solution does not cover all cases
return super(RK, self).solve_T(P, V, solution=solution)
def T_discriminant_zeros_analytical(self, valid=False):
r'''Method to calculate the temperatures which zero the discriminant
function of the `RK` eos. This is an analytical function with an
11-coefficient polynomial which is solved with `numpy`.
Parameters
----------
valid : bool
Whether to filter the calculated temperatures so that they are all
real, and positive only, [-]
Returns
-------
T_discriminant_zeros : float
Temperatures which make the discriminant zero, [K]
Notes
-----
Calculated analytically. Derived as follows. Has multiple solutions.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, b, a, Troot = symbols('P, T, V, R, b, a, Troot') # doctest:+SKIP
>>> a_alpha = a/sqrt(T) # doctest:+SKIP
>>> delta, epsilon = b, 0 # doctest:+SKIP
>>> eta = b # doctest:+SKIP
>>> B = b*P/(R*T) # doctest:+SKIP
>>> deltas = delta*P/(R*T) # doctest:+SKIP
>>> thetas = a_alpha*P/(R*T)**2 # doctest:+SKIP
>>> epsilons = epsilon*(P/(R*T))**2 # doctest:+SKIP
>>> etas = eta*P/(R*T) # doctest:+SKIP
>>> a_coeff = 1 # doctest:+SKIP
>>> b_coeff = (deltas - B - 1) # doctest:+SKIP
>>> c = (thetas + epsilons - deltas*(B+1)) # doctest:+SKIP
>>> d = -(epsilons*(B+1) + thetas*etas) # doctest:+SKIP
>>> disc = b_coeff*b_coeff*c*c - 4*a_coeff*c*c*c - 4*b_coeff*b_coeff*b_coeff*d - 27*a_coeff*a_coeff*d*d + 18*a_coeff*b_coeff*c*d # doctest:+SKIP
>>> new_disc = disc.subs(sqrt(T), Troot) # doctest:+SKIP
>>> new_T_base = expand(expand(new_disc)*Troot**15) # doctest:+SKIP
>>> ans = collect(new_T_base, Troot).args # doctest:+SKIP
'''
P, a, b, epsilon, delta = self.P, self.a, self.b, self.epsilon, self.delta
a *= self.Tc**0.5 # pre-dates change in alpha definition
P2 = P*P
P3 = P2*P
P4 = P2*P2
R_inv4 = R_inv2*R_inv2
R_inv5 = R_inv4*R_inv
R_inv6 = R_inv4*R_inv2
b2 = b*b
b3 = b2*b
b4 = b2*b2
a2 = a*a
x5 = 15.0*a2
x8 = 2.0*P3
x9 = b4*b*R_inv
x13 = 6.0*R_inv*R_inv2
coeffs = [P2*b2*R_inv2,
0.0,
P3*b3*x13,
-a*b*P2*x13,
13.0*R_inv4*P4*b4,
-32.0*a*P3*R_inv4*b2,
P2*R_inv4*(12.0*P3*x9 + a2),
-42.0*a*b3*P4*R_inv5,
b*R_inv5*x8*(x5 + x8*x9),
-12.0*P2*P3*a*R_inv6*b4,
-R_inv6*b2*P4*x5,
-4.0*a2*a*P3*R_inv6]
roots = np.roots(coeffs).tolist()
roots = [i*i for i in roots]
if valid:
# TODO - only include ones when switching phases from l/g to either g/l
# Do not know how to handle
roots = [r.real for r in roots if (r.real >= 0.0 and (abs(r.imag) <= 1e-12))]
roots.sort()
return roots
class SRK(GCEOS):
r'''Class for solving the Soave-Redlich-Kwong [1]_ [2]_ [3]_ cubic
equation of state for a pure compound. Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha(T) = \left[1 + m\left(1 - \sqrt{\frac{T}{T_c}}\right)\right]^2
.. math::
m = 0.480 + 1.574\omega - 0.176\omega^2
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
>>> eos = SRK(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000146821077354, -31754.663859, -74.373272044)
References
----------
.. [1] Soave, Giorgio. "Equilibrium Constants from a Modified Redlich-Kwong
Equation of State." Chemical Engineering Science 27, no. 6 (June 1972):
1197-1203. doi:10.1016/0009-2509(72)80096-4.
.. [2] Poling, Bruce E. The Properties of Gases and Liquids. 5th
edition. New York: McGraw-Hill Professional, 2000.
.. [3] Walas, Stanley M. Phase Equilibria in Chemical Engineering.
Butterworth-Heinemann, 1985.
'''
c1 = 0.4274802335403414043909906940611707345513 # 1/(9*(2**(1/3.)-1))
'''Full value of the constant in the `a` parameter'''
c2 = 0.08664034996495772158907020242607611685675 # (2**(1/3.)-1)/3
'''Full value of the constant in the `b` parameter'''
c1R2 = c1*R2
c2R = c2*R
c1R2_c2R = c1R2/c2R
epsilon = 0.0
'''`epsilon` is always zero for the :obj:`SRK` EOS'''
Zc = 1/3.
'''Mechanical compressibility of :obj:`SRK` EOS'''
Psat_coeffs_limiting = [-3.2308843103522107, 0.7210534170705403]
Psat_coeffs_critical = [9.374273428735918, -6.15924292062784,
4.995561268009732, -3.0536215892966374,
1.0000000000023588]
Psat_cheb_coeffs = [-7.871741490227961, -7.989748461289071, -0.1356344797770207, 0.009506579247579184,
0.009624489219138763, -0.007066708482598217, 0.003075503887853841, -0.001012177935988426,
0.00028619693856193646, -8.960150789432905e-05, 3.8678642545223406e-05, -1.903594210476056e-05,
8.531492278109217e-06, -3.345456890803595e-06, 1.2311165149343946e-06, -4.784033464026011e-07,
2.0716513992539553e-07, -9.365210448247373e-08, 4.088078067054522e-08, -1.6950725229317957e-08,
6.9147476960875615e-09, -2.9036036947212296e-09, 1.2683728020787197e-09, -5.610046772833513e-10,
2.444858416194781e-10, -1.0465240317131946e-10, 4.472305869824417e-11, -1.9380782026977295e-11,
8.525075935982007e-12, -3.770209730351304e-12, 1.6512636527230007e-12, -7.22057288092548e-13,
3.2921267708457824e-13, -1.616661448808343e-13, 6.227456701354828e-14]
Psat_cheb_constant_factor = (-2.5857326352412238, 0.38702722494279784)
Psat_cheb_coeffs_der = chebder(Psat_cheb_coeffs)
Psat_coeffs_critical_der = polyder(Psat_coeffs_critical[::-1])[::-1]
phi_sat_coeffs = [4.883976406433718e-10, -2.00532968010467e-08, 3.647765457046907e-07,
-3.794073186960753e-06, 2.358762477641146e-05, -7.18419726211543e-05,
-0.00013493130050539593, 0.002716443506003684, -0.015404883730347763,
0.05251643616017714, -0.11346125895127993, 0.12885073074459652,
0.0403144920149403, -0.39801902918654086, 0.5962308106352003,
0.6656153310272716]
_P_zero_l_cheb_coeffs = [0.08380676900731782, -0.14019219743961803, 0.11742103327156811, -0.09849160801348428, 0.08273868596563422, -0.0696144897386927, 0.05866765693877264, -0.04952599518184439, 0.04188237509387957, -0.03548315864697149, 0.03011872010893725, -0.02561566850666065, 0.021830462208254395, -0.018644172238802145, 0.015958169671823057, -0.013690592984703707, 0.011773427351986342, -0.01015011684404267, 0.00877358083868034, -0.007604596012758029, 0.006610446573984768, -0.005763823407070205, 0.005041905306198975, -0.004425605918781876, 0.003898948480582476, -0.003448548272342346, 0.0030631866753461218, -0.002733454718851159, 0.0024514621141303247, -0.0022105921907339815, 0.002005302198095145, -0.0018309561248985515, 0.0016836870172771135, -0.0015602844635190134, 0.0014581002673540663, -0.0013749738886825284, 0.0013091699610779176, -0.001259330218826276, 0.001224435336044407, -0.0012037764696538264, 0.0005984681105455358]
P_zero_l_cheb_limits = (0.0009838646849082977, 77.36362033836788)
_P_zero_g_cheb_coeffs = [4074.379698522392, 4074.0787931079158, -0.011974050537509407, 0.011278738948946121, -0.010623695898806596, 0.010006612855718989, -0.00942531345107397, 0.008877745971729046, -0.008361976307962505, 0.007876181274528127, -0.007418642356788098, 0.006987739799033855, -0.006581946966943887, 0.006199825106351055, -0.00584001837117817, 0.005501249138059018, -0.0051823135959307666, 0.004882077534036101, -0.004599472449233056, 0.004333491845900562, -0.004083187738391304, 0.00384766734038441, -0.0036260899632846967, 0.00341766412351482, -0.003221644783037071, 0.003037330724301647, -0.0028640621256170408, 0.0027012182634600047, -0.0025482153614670667, 0.00240450452795168, -0.0022695698397005816, 0.002142926545674241, -0.0020241193744505405, 0.0019127209575542047, -0.0018083302956923518, 0.0017105713491642703, -0.0016190917803071369, 0.0015335616642137794, -0.0014536723452853405, 0.001379135339081262, -0.0013096813358352787, 0.001245059275896766, -0.0011850354079059, 0.001129392510023498, -0.0010779290997626433, 0.0010304587604847658, -0.0009868094600730913, 0.0009468229117978965, -0.0009103540735282088, 0.0008772706097128445, -0.0008474524304184726, 0.0008207912556403528, -0.0007971902270068286, 0.000776563594266667, -0.0007588363976502542, 0.0007439441894165576, -0.0007318328255327643, 0.0007224582401159317, -0.0007157863644244543, 0.0007117929301416425, -0.0003552316997513632]
P_zero_g_cheb_limits = (-0.9648141211597231, 34.80547339996925)
# Nov 2019
# Psat_ranges_low = (0.016623281310365744, 0.1712825877822172, 0.8775228637034642, 2.4185778704993384, 4.999300695376596, 10.621733701210832, 21.924686089216046, 46.23658652939059, 111.97303237634476)
# Psat_coeffs_low = [[3.3680989254730784e+17, -4.074818435768317e+16, 2209815483748018.2, -70915386325767.22, 1497265032843.7883, -21873765985.033226, 226390057.35154417, -1671116.2651395416, 8737.327630885395, -31.38783252903762, -0.3062576872041857, 0.33312130842499577, -3.053621478895965, -3.31001545617049e-11], [-1277.9939354449687, 1593.2536430725866, -891.7360022419283, 295.8342513857935, -64.78832619622327, 9.999684056700664, -1.2867875988497843, 0.32779561506053606, -0.2700702867816281, 0.3102313474312917, -0.38304293907646136, 0.3332375577689779, -3.0536215764869326, 2.360556194958008e-12], [-0.000830505972635258, 0.006865390327553869, -0.026817234829898506, 0.0665672622542815, -0.119964606281312, 0.17169598695361063, -0.2106764625423519, 0.23752105248153738, -0.2630589319705226, 0.3095504696125893, -0.3829670684832488, 0.3332307853291866, -3.053621198743048, -9.605050754757372e-09], [-3.9351433749518387e-07, 9.598894454703375e-06, -0.00010967371180484353, 0.0007796748366203162, -0.0038566383026144148, 0.014042499802344673, -0.03890674173205208, 0.08460429046640522, -0.15233989442440943, 0.24596202389042182, -0.3552486542779753, 0.32467374082763434, -3.0519642694194045, -0.00015063394168279842], [9.630665339915374e-10, -4.7371315246695036e-08, 1.058979499331494e-06, -1.4148499852908107e-05, 0.00012453404851616215, -0.000744874809594022, 0.00295128240256063, -0.006592268033281193, -0.00018461593083082123, 0.055334589912369295, -0.18248894355952128, 0.21859711935534465, -3.0129799097945456, -0.006486114455004355], [-2.710468940879406e-14, 2.361990546087112e-12, -8.567303244166706e-11, 1.4893663407003366e-09, -3.858548795875803e-09, -4.2380960424066427e-07, 1.1242926127857602e-05, -0.0001632710637823122, 0.001612992494042694, -0.011564861884906304, 0.06197160418123528, -0.25590435995049254, -2.502008242669764, -0.2488201810754127], [1.6083147737513614e-17, -3.625948333600919e-15, 3.779796571460543e-13, -2.4156008540207418e-11, 1.0579233657428334e-09, -3.361617809293566e-08, 8.003083689291334e-07, -1.4534087164009375e-05, 0.0002031152121569259, -0.002189767259610491, 0.018210271943770506, -0.11793369058835379, -2.7679151499088324, -0.010786625844602327], [1.4110844119182223e-21, -6.673466228406512e-19, 1.458414381897635e-16, -1.9526180721541405e-14, 1.790075947985849e-12, -1.1894965358505194e-10, 5.91471378248656e-09, -2.2398516390225003e-07, 6.512345505221323e-06, -0.0001455617494641103, 0.002494987494838556, -0.03292429235639192, -3.0591018122950038, 0.4673525314587721], [2.1123051961710074e-25, -2.1083091388936946e-22, 9.662063972407386e-20, -2.693978918168614e-17, 5.1041762501040065e-15, -6.950692277142413e-13, 7.0161397235176e-11, -5.335818543025505e-09, 3.076755677498343e-07, -1.3436008106355354e-05, 0.00044163897730386165, -0.010903751691783911, -3.2044489982966082, 0.9087101274749898]]
# Dec 2019
Psat_ranges_low = (0.016623281310365744, 0.1712825877822172, 0.8775228637034642, 2.4185778704993384, 5.001795116965993, 9.695206781787403, 21.057832476172877, 46.27931918489475, 106.60008206151481, 206.46094535380982)
Psat_coeffs_low = [[-3.021742864809473e+22, 4.214239383836392e+21, -2.6741136978422124e+20, 1.0217841941834105e+19, -2.622411357075726e+17, 4774407979621216.0, -63484876960438.69, 625375442527.9032, -4581025644.8943615, 24826982.029340215, -98159.10795313062, 276.50340512054163, -0.9143654631342611, 0.3338916360475657, -3.0536220324119654, 1.3475048507571863e-10], [1851732.9501797194, -2605860.7399044684, 1659613.6490526376, -632650.6574176748, 160893.98711246205, -28808.208934565508, 3736.231492867314, -355.6971459537775, 24.760714782747538, -1.0423063013028406, -0.21733901552867047, 0.3087713311546577, -0.3830146794101142, 0.3332371947330795, -3.05362157370627, -7.227607401461e-12], [-0.0004521121892877062, 0.004090442697504045, -0.0175578643282661, 0.04794251207356016, -0.09461038038864959, 0.14623339766010854, -0.18881543977182574, 0.216237584862916, -0.23240996967465383, 0.2455135904651031, -0.2652544858737314, 0.30999258403096164, -0.383030205401439, 0.33323681959713436, -3.053621543830392, -7.017832981404126e-10], [-7.808701919427604e-08, 2.077661560061249e-06, -2.5817479151146433e-05, 0.00019946831868046186, -0.0010777009706245766, 0.004349178573005843, -0.01369170969849733, 0.03466227625915358, -0.07207265601431376, 0.12553031872708703, -0.19076746372442283, 0.2729239080553058, -0.36893226041645655, 0.32941620336187777, -3.0529679799065943, -5.2835266906470224e-05], [1.4671478312986887e-11, -1.0110442264467632e-09, 3.1974970384785367e-08, -6.163209773850742e-07, 8.094505145684214e-06, -7.656745582686593e-05, 0.0005363374762358416, -0.002806916403123579, 0.010866043155195763, -0.029908987582571975, 0.052158093336682546, -0.032663621693629505, -0.0751716186255902, 0.12892284191276268, -2.967051324985992, -0.017359415309005755], [-1.091394193997453e-14, 1.2357554812857725e-12, -6.508815052182767e-11, 2.1148805902814486e-09, -4.738775276800668e-08, 7.750511363006775e-07, -9.547217015303917e-06, 9.00036272090859e-05, -0.0006521700537524791, 0.0036045130640996606, -0.014814913109019256, 0.04242862345426275, -0.06750190169757553, -0.04208608128453949, -2.7194330511309253, -0.14620471607422303], [1.2884100931052691e-19, -3.1352234465014476e-17, 3.5604267936540494e-15, -2.505139585733175e-13, 1.222651372599914e-11, -4.3907452596568276e-10, 1.200890247630985e-08, -2.5540606353043675e-07, 4.275108728223701e-06, -5.664187536164573e-05, 0.0005945229499256919, -0.004930179620565587, 0.03219833012293544, -0.16707167951374932, -2.661698607705941, -0.11728474036871717], [1.4718778168288712e-24, -7.827527144097033e-22, 1.9419598473231654e-19, -2.9838110847734e-17, 3.17855768668334e-15, -2.4899868244476545e-13, 1.4844855897901513e-11, -6.8756271353964e-10, 2.503217441471856e-08, -7.201291894218883e-07, 1.637034511468395e-05, -0.0002928284260408074, 0.004096142697714258, -0.0448856246444143, -3.0042011777749145, 0.3506385318223977], [8.493893312557766e-30, -1.0255827364680145e-26, 5.772959502607649e-24, -2.0110473459289428e-21, 4.853221287871829e-19, -8.60543209118676e-17, 1.1601546784661254e-14, -1.2138195783117383e-12, 9.970282232925484e-11, -6.461623477876083e-09, 3.3028403185783974e-07, -1.3249473054302956e-05, 0.00041394079374352697, -0.010055381746092074, -3.217048228957832, 0.986564340774919], [5.909724328094521e-34, -1.373533672302407e-30, 1.4873473750776103e-27, -9.960045776404399e-25, 4.616398303867023e-22, -1.5703667768168258e-19, 4.056122702342784e-17, -8.116898226364067e-15, 1.2725759075121173e-12, -1.5701193776679807e-10, 1.5228962066971988e-08, -1.1543563076442494e-06, 6.776387262920499e-05, -0.0030684174426771718, -3.30604432857107, 1.5254388255202684]]
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.a = self.c1*R*R*Tc*Tc/Pc
self.b = self.c2*R*Tc/Pc
self.m = 0.480 + 1.574*omega - 0.176*omega*omega
self.delta = self.b
self.solve()
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Uses the set values of `Tc`, `m`, and `a`.
.. math::
a\alpha = a \left(m \left(- \sqrt{\frac{T}{T_{c}}} + 1\right)
+ 1\right)^{2}
.. math::
\frac{d a\alpha}{dT} = \frac{a m}{T} \sqrt{\frac{T}{T_{c}}} \left(m
\left(\sqrt{\frac{T}{T_{c}}} - 1\right) - 1\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = \frac{a m \sqrt{\frac{T}{T_{c}}}}{2 T^{2}}
\left(m + 1\right)
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
da_alpha_dT : float
Temperature derivative of coefficient calculated by EOS-specific
method, [J^2/mol^2/Pa/K]
d2a_alpha_dT2 : float
Second temperature derivative of coefficient calculated by
EOS-specific method, [J^2/mol^2/Pa/K^2]
'''
a, Tc, m = self.a, self.Tc, self.m
sqTr = (T/Tc)**0.5
a_alpha = a*(m*(1. - sqTr) + 1.)**2
da_alpha_dT = -a*m*sqTr*(m*(-sqTr + 1.) + 1.)/T
d2a_alpha_dT2 = a*m*sqTr*(m + 1.)/(2.*T*T)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
r'''Method to calculate :math:`a \alpha` for this EOS. Uses the set
values of `Tc`, `m`, and `a`.
.. math::
a\alpha = a \left(m \left(- \sqrt{\frac{T}{T_{c}}} + 1\right)
+ 1\right)^{2}
Parameters
----------
T : float
Temperature at which to calculate the values, [-]
Returns
-------
a_alpha : float
Coefficient calculated by EOS-specific method, [J^2/mol^2/Pa]
'''
a, Tc, m = self.a, self.Tc, self.m
sqTr = sqrt(T/Tc)
x0 = (m*(1. - sqTr) + 1.)
return a*x0*x0
def P_max_at_V(self, V):
r'''Method to calculate the maximum pressure the EOS can create at a
constant volume, if one exists; returns None otherwise.
Parameters
----------
V : float
Constant molar volume, [m^3/mol]
Returns
-------
P : float
Maximum possible isochoric pressure, [Pa]
Notes
-----
The analytical determination of this formula involved some part of the
discriminant, and much black magic.
Examples
--------
>>> e = SRK(P=1e5, V=0.0001437, Tc=512.5, Pc=8084000.0, omega=0.559)
>>> e.P_max_at_V(e.V)
490523786.2
'''
'''
from sympy import *
# Solve for when T equal
P, T, V, R, a, b, m = symbols('P, T, V, R, a, b, m')
Tc, Pc, omega = symbols('Tc, Pc, omega')
# from the T solution, get the square root part, find when it hits zero
# to_zero = sqrt(Tc**2*V*a**2*m**2*(V - b)**3*(V + b)*(m + 1)**2*(P*R*Tc*V**2 + P*R*Tc*V*b - P*V*a*m**2 + P*a*b*m**2 + R*Tc*a*m**2 + 2*R*Tc*a*m + R*Tc*a))
lhs = P*R*Tc*V**2 + P*R*Tc*V*b - P*V*a*m**2 + P*a*b*m**2
rhs = R*Tc*a*m**2 + 2*R*Tc*a*m + R*Tc*a
hit = solve(Eq(lhs, rhs), P)
'''
# grows unbounded for all mixture EOS?
try:
Tc, a, m, b = self.Tc, self.a, self.m, self.b
except:
Tc, a, m, b = self.Tcs[0], self.ais[0], self.ms[0], self.bs[0]
P_max = -R*Tc*a*(m**2 + 2*m + 1)/(R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2)
if P_max < 0.0:
return None
return P_max
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the SRK
EOS. Uses `a`, `b`, and `Tc` obtained from the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
The exact solution can be derived as follows; it is excluded for
breviety.
>>> from sympy import * # doctest:+SKIP
>>> P, T, V, R, a, b, m = symbols('P, T, V, R, a, b, m') # doctest:+SKIP
>>> Tc, Pc, omega = symbols('Tc, Pc, omega') # doctest:+SKIP
>>> a_alpha = a*(1 + m*(1-sqrt(T/Tc)))**2 # doctest:+SKIP
>>> SRK = R*T/(V-b) - a_alpha/(V*(V+b)) - P # doctest:+SKIP
>>> solve(SRK, T) # doctest:+SKIP
'''
# Takes like half an hour to be derived, saved here for convenience
# ([(Tc*(V - b)*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b + R**2*Tc**2*V**2*b**2
# - 2*R*Tc*V**3*a*m**2 + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4 - 2*V*a**2*b*m**4
# + a**2*b**2*m**4)*(P*R*Tc*V**4 + 2*P*R*Tc*V**3*b + P*R*Tc*V**2*b**2
# - P*V**3*a*m**2 + P*V*a*b**2*m**2 + R*Tc*V**2*a*m**2
# + 2*R*Tc*V**2*a*m + R*Tc*V**2*a + R*Tc*V*a*b*m**2
# + 2*R*Tc*V*a*b*m + R*Tc*V*a*b + V*a**2*m**4 + 2*V*a**2*m**3
# + V*a**2*m**2 - a**2*b*m**4 - 2*a**2*b*m**3 - a**2*b*m**2)
# - 2*sqrt(Tc**2*V*a**2*m**2*(V - b)**3*(V + b)*(m + 1)**2*(P*R*Tc*V**2
# + P*R*Tc*V*b - P*V*a*m**2 + P*a*b*m**2 + R*Tc*a*m**2 + 2*R*Tc*a*m + R*Tc*a))*(R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2)**2)/((R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2)**2*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b + R**2*Tc**2*V**2*b**2 - 2*R*Tc*V**3*a*m**2 + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4 - 2*V*a**2*b*m**4 + a**2*b**2*m**4)),
# (Tc*(V - b)*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b + R**2*Tc**2*V**2*b**2
# - 2*R*Tc*V**3*a*m**2 + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4 - 2*V*a**2*b*m**4
# + a**2*b**2*m**4)*(P*R*Tc*V**4 + 2*P*R*Tc*V**3*b + P*R*Tc*V**2*b**2
# - P*V**3*a*m**2 + P*V*a*b**2*m**2 + R*Tc*V**2*a*m**2
# + 2*R*Tc*V**2*a*m + R*Tc*V**2*a + R*Tc*V*a*b*m**2
# + 2*R*Tc*V*a*b*m + R*Tc*V*a*b + V*a**2*m**4 + 2*V*a**2*m**3
# + V*a**2*m**2 - a**2*b*m**4 - 2*a**2*b*m**3 - a**2*b*m**2)
# + 2*sqrt(Tc**2*V*a**2*m**2*(V - b)**3*(V + b)*(m + 1)**2*(P*R*Tc*V**2
# + P*R*Tc*V*b - P*V*a*m**2 + P*a*b*m**2 + R*Tc*a*m**2
# + 2*R*Tc*a*m + R*Tc*a))*(R*Tc*V**2 + R*Tc*V*b - V*a*m**2
# + a*b*m**2)**2)/((R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2
# )**2*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b
# + R**2*Tc**2*V**2*b**2 - 2*R*Tc*V**3*a*m**2
# + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4
# - 2*V*a**2*b*m**4 + a**2*b**2*m**4))])
self.no_T_spec = True
a, b, Tc, m = self.a, self.b, self.Tc, self.m
if True:
x0 = R*Tc
x1 = V*b
x2 = x0*x1
x3 = V*V
x4 = x0*x3
x5 = m*m
x6 = a*x5
x7 = b*x6
x8 = V*x6
x9 = (x2 + x4 + x7 - x8)**2
x10 = x3*x3
x11 = R*R*Tc*Tc
x12 = a*a
x13 = x5*x5
x14 = x12*x13
x15 = b*b
x16 = x3*V
x17 = a*x0
x18 = x17*x5
x19 = 2.*b*x16
x20 = -2.*V*b*x14 + 2.*V*x15*x18 + x10*x11 + x11*x15*x3 + x11*x19 + x14*x15 + x14*x3 - 2*x16*x18
x21 = V - b
x22 = 2*m*x17
x23 = P*x4
x24 = P*x8
x25 = x1*x17
x26 = P*R*Tc
x27 = x17*x3
x28 = V*x12
x29 = 2.*m*m*m
x30 = b*x12
T_calc = -Tc*(2.*a*m*x9*(V*x21*x21*x21*(V + b)*(P*x2 + P*x7 + x17 + x18 + x22 + x23 - x24))**0.5*(m + 1.) - x20*x21*(-P*x16*x6 + x1*x22 + x10*x26 + x13*x28 - x13*x30 + x15*x23 + x15*x24 + x19*x26 + x22*x3 + x25*x5 + x25 + x27*x5 + x27 + x28*x29 + x28*x5 - x29*x30 - x30*x5))/(x20*x9)
if abs(T_calc.imag) > 1e-12:
raise ValueError("Calculated imaginary temperature %s" %(T_calc))
return T_calc
else:
return Tc*(-2*a*m*sqrt(V*(V - b)**3*(V + b)*(P*R*Tc*V**2 + P*R*Tc*V*b - P*V*a*m**2 + P*a*b*m**2 + R*Tc*a*m**2 + 2*R*Tc*a*m + R*Tc*a))*(m + 1)*(R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2)**2 + (V - b)*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b + R**2*Tc**2*V**2*b**2 - 2*R*Tc*V**3*a*m**2 + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4 - 2*V*a**2*b*m**4 + a**2*b**2*m**4)*(P*R*Tc*V**4 + 2*P*R*Tc*V**3*b + P*R*Tc*V**2*b**2 - P*V**3*a*m**2 + P*V*a*b**2*m**2 + R*Tc*V**2*a*m**2 + 2*R*Tc*V**2*a*m + R*Tc*V**2*a + R*Tc*V*a*b*m**2 + 2*R*Tc*V*a*b*m + R*Tc*V*a*b + V*a**2*m**4 + 2*V*a**2*m**3 + V*a**2*m**2 - a**2*b*m**4 - 2*a**2*b*m**3 - a**2*b*m**2))/((R*Tc*V**2 + R*Tc*V*b - V*a*m**2 + a*b*m**2)**2*(R**2*Tc**2*V**4 + 2*R**2*Tc**2*V**3*b + R**2*Tc**2*V**2*b**2 - 2*R*Tc*V**3*a*m**2 + 2*R*Tc*V*a*b**2*m**2 + V**2*a**2*m**4 - 2*V*a**2*b*m**4 + a**2*b**2*m**4))
class SRKTranslated(SRK):
r'''Class for solving the volume translated Peng-Robinson equation of state.
Subclasses :obj:`SRK`. Solves the EOS on initialization.
This is intended as a base class for all translated variants of the
SRK EOS.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha(T) = \left[1 + m\left(1 - \sqrt{\frac{T}{T_c}}\right)\right]^2
.. math::
m = 0.480 + 1.574\omega - 0.176\omega^2
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple or None
Coefficients which may be specified by subclasses; set to None to use
the original Peng-Robinson alpha function, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization:
>>> eos = SRKTranslated(T=305, P=1.1e5, Tc=512.5, Pc=8084000.0, omega=0.559, c=-1e-6)
>>> eos.phase, eos.V_l, eos.V_g
('l/g', 5.5131657318e-05, 0.022447661363)
Notes
-----
References
----------
.. [1] Gmehling, Jürgen, Michael Kleiber, Bärbel Kolbe, and Jürgen Rarey.
Chemical Thermodynamics for Process Simulation. John Wiley & Sons, 2019.
'''
solve_T = GCEOS.solve_T
P_max_at_V = GCEOS.P_max_at_V
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, alpha_coeffs=None, c=0.0, T=None, P=None,
V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1*R*R*Tc*Tc*Pc_inv
self.c = c
if alpha_coeffs is None:
self.m = 0.480 + 1.574*omega - 0.176*omega*omega
self.alpha_coeffs = alpha_coeffs
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
b0 = self.c2*R*Tc*Pc_inv
self.b = b0 - c
### from sympy.abc import V, c, b, epsilon, delta
### expand((V+c)*((V+c)+b))
# delta = (b + 2*c)
self.delta = c + c + b0
# epsilon = b*c + c*c
self.epsilon = c*(b0 + c)
self.solve()
class MSRKTranslated(Soave_1979_a_alpha, SRKTranslated):
r'''Class for solving the volume translated Soave (1980) alpha function,
revision of the Soave-Redlich-Kwong equation of state
for a pure compound according to [1]_. Uses two fitting parameters `N` and
`M` to more accurately fit the vapor pressure of pure species.
Subclasses `SRKTranslated`.
Solves the EOS on initialization. See `SRKTranslated` for further
documentation.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha(T) = 1 + (1 - T_r)(M + \frac{N}{T_r})
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
alpha_coeffs : tuple(float[3]), optional
Coefficients M, N of this EOS's alpha function, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (hexane), liquid phase:
>>> eos = MSRKTranslated(Tc=507.6, Pc=3025000, omega=0.2975, c=22.0561E-6, M=0.7446, N=0.2476, T=250., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.0001169276461322, -34571.6862673, -84.757900348)
Notes
-----
This is an older correlation that offers lower accuracy on many properties
which were sacrificed to obtain the vapor pressure accuracy. The alpha
function of this EOS does not meet any of the consistency requriements for
alpha functions.
Coefficients can be found in [2]_, or estimated with the method in [3]_.
The estimation method in [3]_ works as follows, using the acentric factor
and true critical compressibility:
.. math::
M = 0.4745 + 2.7349(\omega Z_c) + 6.0984(\omega Z_c)^2
.. math::
N = 0.0674 + 2.1031(\omega Z_c) + 3.9512(\omega Z_c)^2
An alternate estimation scheme is provided in [1]_, which provides
analytical solutions to calculate the parameters `M` and `N` from two
points on the vapor pressure curve, suggested as 10 mmHg and 1 atm.
This is used as an estimation method here if the parameters are not
provided, and the two vapor pressure points are obtained from the original
SRK equation of state.
References
----------
.. [1] Soave, G. "Rigorous and Simplified Procedures for Determining
the Pure-Component Parameters in the Redlich—Kwong—Soave Equation of
State." Chemical Engineering Science 35, no. 8 (January 1, 1980):
1725-30. https://doi.org/10.1016/0009-2509(80)85007-X.
.. [2] Sandarusi, Jamal A., Arthur J. Kidnay, and Victor F. Yesavage.
"Compilation of Parameters for a Polar Fluid Soave-Redlich-Kwong
Equation of State." Industrial & Engineering Chemistry Process Design
and Development 25, no. 4 (October 1, 1986): 957-63.
https://doi.org/10.1021/i200035a020.
.. [3] Valderrama, Jose O., Héctor De la Puente, and Ahmed A. Ibrahim.
"Generalization of a Polar-Fluid Soave-Redlich-Kwong Equation of State."
Fluid Phase Equilibria 93 (February 11, 1994): 377-83.
https://doi.org/10.1016/0378-3812(94)87021-7.
'''
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, M=None, N=None, alpha_coeffs=None, c=0.0,
T=None, P=None, V=None):
# Ready for mixture class implemenentation
# M, N may be specified instead of alpha_coeffs currently only
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1*R*R*Tc*Tc*Pc_inv
self.c = c
b0 = self.c2*R*Tc*Pc_inv
self.b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
if alpha_coeffs is None and (M is None or N is None):
alpha_coeffs = MSRKTranslated.estimate_MN(Tc, Pc, omega, c)
if M is not None and N is not None:
alpha_coeffs = (M, N)
self.alpha_coeffs = alpha_coeffs
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
self.solve()
@staticmethod
def estimate_MN(Tc, Pc, omega, c=0.0):
r'''Calculate the alpha values for the MSRK equation to match two pressure
points, and solve analytically for the M, N required to match exactly that.
Since no experimental data is available, make it up with the original
SRK EOS.
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
Returns
-------
M : float
M parameter, [-]
N : float
N parameter, [-]
Examples
--------
>>> from sympy import * # doctest:+SKIP
>>> Tc, m, n = symbols('Tc, m, n') # doctest:+SKIP
>>> T0, T1 = symbols('T_10, T_760') # doctest:+SKIP
>>> alpha0, alpha1 = symbols('alpha_10, alpha_760') # doctest:+SKIP
>>> Eqs = [Eq(alpha0, 1 + (1 - T0/Tc)*(m + n/(T0/Tc))), Eq(alpha1, 1 + (1 - T1/Tc)*(m + n/(T1/Tc)))] # doctest:+SKIP
>>> solve(Eqs, [n, m]) # doctest:+SKIP
'''
SRK_base = SRKTranslated(T=Tc*0.5, P=Pc*0.5, c=c, Tc=Tc, Pc=Pc, omega=omega)
# Temperatures at 10 mmHg, 760 mmHg
P_10, P_760 = 10.0*mmHg, 760.0*mmHg
T_10 = SRK_base.Tsat(P_10)
T_760 = SRK_base.Tsat(P_760)
alpha_10 = SRK_base.a_alpha_and_derivatives(T=T_10, full=False)/SRK_base.a
alpha_760 = SRK_base.a_alpha_and_derivatives(T=T_760, full=False)/SRK_base.a
N = T_10*T_760*(-(T_10 - Tc)*(alpha_760 - 1) + (T_760 - Tc)*(alpha_10 - 1))/((T_10 - T_760)*(T_10 - Tc)*(T_760 - Tc))
M = Tc*(-T_10*(T_760 - Tc)*(alpha_10 - 1) + T_760*(T_10 - Tc)*(alpha_760 - 1))/((T_10 - T_760)*(T_10 - Tc)*(T_760 - Tc))
return (M, N)
class SRKTranslatedPPJP(SRK):
r'''Class for solving the volume translated Pina-Martinez, Privat, Jaubert,
and Peng revision of the Soave-Redlich-Kwong equation of state
for a pure compound according to [1]_.
Subclasses `SRK`, which provides everything except the variable `kappa`.
Solves the EOS on initialization. See `SRK` for further documentation.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha(T) = \left[1 + m\left(1 - \sqrt{\frac{T}{T_c}}\right)\right]^2
.. math::
m = 0.4810 + 1.5963 \omega - 0.2963\omega^2 + 0.1223\omega^3
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (hexane), liquid phase:
>>> eos = SRKTranslatedPPJP(Tc=507.6, Pc=3025000, omega=0.2975, c=22.3098E-6, T=250., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.00011666322408111662, -34158.934132722185, -83.06507748137201)
Notes
-----
This variant offers incremental improvements in accuracy only, but those
can be fairly substantial for some substances.
References
----------
.. [1] Pina-Martinez, Andrés, Romain Privat, Jean-Noël Jaubert, and
Ding-Yu Peng. "Updated Versions of the Generalized Soave α-Function
Suitable for the Redlich-Kwong and Peng-Robinson Equations of State."
Fluid Phase Equilibria, December 7, 2018.
https://doi.org/10.1016/j.fluid.2018.12.007.
'''
kwargs_keys = ('c',)
# No point in subclassing SRKTranslated - just disables direct solver for T
def __init__(self, Tc, Pc, omega, c=0.0, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
self.a = self.c1*R2*Tc*Tc*Pc_inv
self.c = c
self.m = omega*(omega*(0.1223*omega - 0.2963) + 1.5963) + 0.4810
self.kwargs = {'c': c}
b0 = self.c2*R*Tc*Pc_inv
self.b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve()
class SRKTranslatedTwu(Twu91_a_alpha, SRKTranslated): pass
class SRKTranslatedConsistent(Twu91_a_alpha, SRKTranslated):
r'''Class for solving the volume translated Le Guennec, Privat, and Jaubert
revision of the SRK equation of state
for a pure compound according to [1]_.
This model's `alpha` is based
on the TWU 1991 model; when estimating, `N` is set to 2.
Solves the EOS on initialization. See `SRK` for further documentation.
.. math::
P = \frac{RT}{V + c - b} - \frac{a\alpha(T)}{(V + c)(V + c + b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha = \left(\frac{T}{T_{c}}\right)^{c_{3} \left(c_{2}
- 1\right)} e^{c_{1} \left(- \left(\frac{T}{T_{c}}
\right)^{c_{2} c_{3}} + 1\right)}
If `c` is not provided, it is estimated as:
.. math::
c =\frac{R T_c}{P_c}(0.0172\omega - 0.0096)
If `alpha_coeffs` is not provided, the parameters `L` and `M` are estimated
from the acentric factor as follows:
.. math::
L = 0.0947\omega^2 + 0.6871\omega + 0.1508
.. math::
M = 0.1615\omega^2 - 0.2349\omega + 0.8876
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
alpha_coeffs : tuple(float[3]), optional
Coefficients L, M, N (also called C1, C2, C3) of TWU 1991 form, [-]
c : float, optional
Volume translation parameter, [m^3/mol]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
P-T initialization (methanol), liquid phase:
>>> eos = SRKTranslatedConsistent(Tc=507.6, Pc=3025000, omega=0.2975, T=250., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.00011846802568940222, -34324.05211005662, -83.83861726864234)
Notes
-----
This variant offers substantial improvements to the SRK-type EOSs - likely
getting about as accurate as this form of cubic equation can get.
References
----------
.. [1] Le Guennec, Yohann, Romain Privat, and Jean-Noël Jaubert.
"Development of the Translated-Consistent Tc-PR and Tc-RK Cubic
Equations of State for a Safe and Accurate Prediction of Volumetric,
Energetic and Saturation Properties of Pure Compounds in the Sub- and
Super-Critical Domains." Fluid Phase Equilibria 429 (December 15, 2016):
301-12. https://doi.org/10.1016/j.fluid.2016.09.003.
'''
kwargs_keys = ('c', 'alpha_coeffs')
def __init__(self, Tc, Pc, omega, alpha_coeffs=None, c=None, T=None,
P=None, V=None):
# estimates volume translation and alpha function parameters
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
Pc_inv = 1.0/Pc
# limit oemga to 0.01 under the eos limit 1.47 for the estimation
o = min(max(omega, -0.01), 1.46)
if c is None:
c = R*Tc*Pc_inv*(0.0172*o + 0.0096)
if alpha_coeffs is None:
L = o*(0.0947*o + 0.6871) + 0.1508
M = o*(0.1615*o - 0.2349) + 0.8876
N = 2.0
alpha_coeffs = (L, M, N)
self.c = c
self.alpha_coeffs = alpha_coeffs
self.kwargs = {'c': c, 'alpha_coeffs': alpha_coeffs}
self.a = self.c1*R2*Tc*Tc*Pc_inv
b0 = self.c2*R*Tc*Pc_inv
self.b = b = b0 - c
self.delta = c + c + b0
self.epsilon = c*(b0 + c)
self.solve()
class APISRK(SRK):
r'''Class for solving the Refinery Soave-Redlich-Kwong cubic
equation of state for a pure compound shown in the API Databook [1]_.
Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
Implemented methods here are `a_alpha_and_derivatives`, which sets
:math:`a \alpha` and its first and second derivatives, and `solve_T`, which from a
specified `P` and `V` obtains `T`. Two fit constants are used in this
expresion, with an estimation scheme for the first if unavailable and the
second may be set to zero.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha(T) = \left[1 + S_1\left(1-\sqrt{T_r}\right) + S_2\frac{1
- \sqrt{T_r}}{\sqrt{T_r}}\right]^2
.. math::
S_1 = 0.48508 + 1.55171\omega - 0.15613\omega^2 \text{ if S1 is not tabulated }
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float, optional
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
S1 : float, optional
Fit constant or estimated from acentric factor if not provided [-]
S2 : float, optional
Fit constant or 0 if not provided [-]
Examples
--------
>>> eos = APISRK(Tc=514.0, Pc=6137000.0, S1=1.678665, S2=-0.216396, P=1E6, T=299)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 7.0456950702e-05, -42826.286146, -103.626979037)
References
----------
.. [1] API Technical Data Book: General Properties & Characterization.
American Petroleum Institute, 7E, 2005.
'''
kwargs_keys = ('S1', 'S2')
def __init__(self, Tc, Pc, omega=None, T=None, P=None, V=None, S1=None,
S2=0):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.check_sufficient_inputs()
if S1 is None and omega is None:
raise Exception('Either acentric factor of S1 is required')
if S1 is None:
self.S1 = S1 = 0.48508 + 1.55171*omega - 0.15613*omega*omega
else:
self.S1 = S1
self.S2 = S2
self.kwargs = {'S1': S1, 'S2': S2}
self.a = self.c1*R*R*Tc*Tc/Pc
self.b = self.c2*R*Tc/Pc
self.delta = self.b
self.solve()
def a_alpha_and_derivatives_pure(self, T):
r'''Method to calculate :math:`a \alpha` and its first and second
derivatives for this EOS. Returns `a_alpha`, `da_alpha_dT`, and
`d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives` for more
documentation. Uses the set values of `Tc`, `a`, `S1`, and `S2`.
.. math::
a\alpha(T) = a\left[1 + S_1\left(1-\sqrt{T_r}\right) + S_2\frac{1
- \sqrt{T_r}}{\sqrt{T_r}}\right]^2
.. math::
\frac{d a\alpha}{dT} = a\frac{T_{c}}{T^{2}} \left(- S_{2} \left(\sqrt{
\frac{T}{T_{c}}} - 1\right) + \sqrt{\frac{T}{T_{c}}} \left(S_{1} \sqrt{
\frac{T}{T_{c}}} + S_{2}\right)\right) \left(S_{2} \left(\sqrt{\frac{
T}{T_{c}}} - 1\right) + \sqrt{\frac{T}{T_{c}}} \left(S_{1} \left(\sqrt{
\frac{T}{T_{c}}} - 1\right) - 1\right)\right)
.. math::
\frac{d^2 a\alpha}{dT^2} = a\frac{1}{2 T^{3}} \left(S_{1}^{2} T
\sqrt{\frac{T}{T_{c}}} - S_{1} S_{2} T \sqrt{\frac{T}{T_{c}}} + 3 S_{1}
S_{2} Tc \sqrt{\frac{T}{T_{c}}} + S_{1} T \sqrt{\frac{T}{T_{c}}}
- 3 S_{2}^{2} Tc \sqrt{\frac{T}{T_{c}}} + 4 S_{2}^{2} Tc + 3 S_{2}
Tc \sqrt{\frac{T}{T_{c}}}\right)
'''
# possible TODO: custom hydrogen a_alpha from
# Graboski, Michael S., and Thomas E. Daubert. "A Modified Soave Equation
# of State for Phase Equilibrium Calculations. 3. Systems Containing
# Hydrogen." Industrial & Engineering Chemistry Process Design and
# Development 18, no. 2 (April 1, 1979): 300-306. https://doi.org/10.1021/i260070a022.
# 1.202*exp(-.30228Tr)
# Will require CAss in kwargs, is_hydrogen array (or skip vectorized approach)
a, Tc, S1, S2 = self.a, self.Tc, self.S1, self.S2
x0 = (T/Tc)**0.5
x1 = x0 - 1.
x2 = x1/x0
x3 = S2*x2
x4 = S1*x1 + x3 - 1.
x5 = S1*x0
x6 = S2 - x3 + x5
x7 = 3.*S2
a_alpha = a*x4*x4
da_alpha_dT = a*x4*x6/T
d2a_alpha_dT2 = a*(-x4*(-x2*x7 + x5 + x7) + x6*x6)/(2.*T*T)
return a_alpha, da_alpha_dT, d2a_alpha_dT2
def a_alpha_pure(self, T):
a, Tc, S1, S2 = self.a, self.Tc, self.S1, self.S2
return a*(S1*(-(T/Tc)**0.5 + 1.) + S2*(-(T/Tc)**0.5 + 1)*(T/Tc)**-0.5 + 1)**2
def solve_T(self, P, V, solution=None):
r'''Method to calculate `T` from a specified `P` and `V` for the API
SRK EOS. Uses `a`, `b`, and `Tc` obtained from the class's namespace.
Parameters
----------
P : float
Pressure, [Pa]
V : float
Molar volume, [m^3/mol]
solution : str or None, optional
'l' or 'g' to specify a liquid of vapor solution (if one exists);
if None, will select a solution more likely to be real (closer to
STP, attempting to avoid temperatures like 60000 K or 0.0001 K).
Returns
-------
T : float
Temperature, [K]
Notes
-----
If S2 is set to 0, the solution is the same as in the SRK EOS, and that
is used. Otherwise, newton's method must be used to solve for `T`.
There are 8 roots of T in that case, six of them real. No guarantee can
be made regarding which root will be obtained.
'''
self.no_T_spec = True
if self.S2 == 0:
self.m = self.S1
return SRK.solve_T(self, P, V, solution=solution)
else:
# Previously coded method is 63 microseconds vs 47 here
# return super(SRK, self).solve_T(P, V)
Tc, a, b, S1, S2 = self.Tc, self.a, self.b, self.S1, self.S2
x2 = R/(V-b)
x3 = (V*(V + b))
def to_solve(T):
x0 = (T/Tc)**0.5
x1 = x0 - 1.
return (x2*T - a*(S1*x1 + S2*x1/x0 - 1.)**2/x3) - P
if solution is None:
try:
return newton(to_solve, Tc*0.5)
except:
pass
return GCEOS.solve_T(self, P, V, solution=solution)
def P_max_at_V(self, V):
if self.S2 == 0:
self.m = self.S1
return SRK.P_max_at_V(self, V)
return GCEOS.P_max_at_V(self, V)
class TWUPR(TwuPR95_a_alpha, PR):
r'''Class for solving the Twu (1995) [1]_ variant of the Peng-Robinson cubic
equation of state for a pure compound. Subclasses :obj:`PR`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
The main implemented method here is :obj:`a_alpha_and_derivatives_pure`,
which sets :math:`a \alpha` and its first and second derivatives.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{v-b}-\frac{a\alpha(T)}{v(v+b)+b(v-b)}
.. math::
a=0.45724\frac{R^2T_c^2}{P_c}
.. math::
b=0.07780\frac{RT_c}{P_c}
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.125283, 0.911807, 1.948150;
L1, M1, N1 = 0.511614, 0.784054, 2.812520
For supercritical conditions:
L0, M0, N0 = 0.401219, 4.963070, -0.2;
L1, M1, N1 = 0.024955, 1.248089, -8.
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
>>> eos = TWUPR(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.V_l, eos.H_dep_l, eos.S_dep_l
(0.00013017554170, -31652.73712, -74.112850429)
Notes
-----
Claimed to be more accurate than the PR, PR78 and PRSV equations.
There is no analytical solution for `T`. There are multiple possible
solutions for `T` under certain conditions; no guaranteed are provided
regarding which solution is obtained.
References
----------
.. [1] Twu, Chorng H., John E. Coon, and John R. Cunningham. "A New
Generalized Alpha Function for a Cubic Equation of State Part 1.
Peng-Robinson Equation." Fluid Phase Equilibria 105, no. 1 (March 15,
1995): 49-59. doi:10.1016/0378-3812(94)02601-V.
'''
P_max_at_V = GCEOS.P_max_at_V
solve_T = GCEOS.solve_T
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.a = self.c1*R*R*Tc*Tc/Pc
self.b = self.c2*R*Tc/Pc
self.delta = 2.*self.b
self.epsilon = -self.b*self.b
self.check_sufficient_inputs()
self.solve()
class TWUSRK(TwuSRK95_a_alpha, SRK):
r'''Class for solving the Soave-Redlich-Kwong cubic
equation of state for a pure compound. Subclasses :obj:`GCEOS`, which
provides the methods for solving the EOS and calculating its assorted
relevant thermodynamic properties. Solves the EOS on initialization.
The main implemented method here is :obj:`a_alpha_and_derivatives_pure`,
which sets :math:`a \alpha` and its first and second derivatives.
Two of `T`, `P`, and `V` are needed to solve the EOS.
.. math::
P = \frac{RT}{V-b} - \frac{a\alpha(T)}{V(V+b)}
.. math::
a=\left(\frac{R^2(T_c)^{2}}{9(\sqrt[3]{2}-1)P_c} \right)
=\frac{0.42748\cdot R^2(T_c)^{2}}{P_c}
.. math::
b=\left( \frac{(\sqrt[3]{2}-1)}{3}\right)\frac{RT_c}{P_c}
=\frac{0.08664\cdot R T_c}{P_c}
.. math::
\alpha = \alpha^{(0)} + \omega(\alpha^{(1)}-\alpha^{(0)})
.. math::
\alpha^{(i)} = T_r^{N(M-1)}\exp[L(1-T_r^{NM})]
For sub-critical conditions:
L0, M0, N0 = 0.141599, 0.919422, 2.496441
L1, M1, N1 = 0.500315, 0.799457, 3.291790
For supercritical conditions:
L0, M0, N0 = 0.441411, 6.500018, -0.20
L1, M1, N1 = 0.032580, 1.289098, -8.0
Parameters
----------
Tc : float
Critical temperature, [K]
Pc : float
Critical pressure, [Pa]
omega : float
Acentric factor, [-]
T : float, optional
Temperature, [K]
P : float, optional
Pressure, [Pa]
V : float, optional
Molar volume, [m^3/mol]
Examples
--------
>>> eos = TWUSRK(Tc=507.6, Pc=3025000, omega=0.2975, T=299., P=1E6)
>>> eos.phase, eos.V_l, eos.H_dep_l, eos.S_dep_l
('l', 0.000146892222966, -31612.6025870, -74.022966093)
Notes
-----
There is no analytical solution for `T`. There are multiple possible
solutions for `T` under certain conditions; no guaranteed are provided
regarding which solution is obtained.
References
----------
.. [1] Twu, Chorng H., John E. Coon, and John R. Cunningham. "A New
Generalized Alpha Function for a Cubic Equation of State Part 2.
Redlich-Kwong Equation." Fluid Phase Equilibria 105, no. 1 (March 15,
1995): 61-69. doi:10.1016/0378-3812(94)02602-W.
'''
P_max_at_V = GCEOS.P_max_at_V
solve_T = GCEOS.solve_T
def __init__(self, Tc, Pc, omega, T=None, P=None, V=None):
self.Tc = Tc
self.Pc = Pc
self.omega = omega
self.T = T
self.P = P
self.V = V
self.a = self.c1*R*R*Tc*Tc/Pc
self.b = self.c2*R*Tc/Pc
self.delta = self.b
self.check_sufficient_inputs()
self.solve()
eos_list = [IG, PR, PR78, PRSV, PRSV2, VDW, RK, SRK, APISRK, TWUPR, TWUSRK,
PRTranslatedPPJP, SRKTranslatedPPJP, MSRKTranslated,
PRTranslatedConsistent, SRKTranslatedConsistent]
'''list : List of all cubic equation of state classes.
'''
eos_2P_list = list(eos_list)
'''list : List of all cubic equation of state classes that can represent
multiple phases.
'''
eos_2P_list.remove(IG)
eos_dict = {c.__name__: c for c in eos_list}
'''dict : Dict of all cubic equation of state classes, indexed by their class name.
'''
eos_full_path_dict = {c.__full_path__: c for c in eos_list}
'''dict : Dict of all cubic equation of state classes, indexed by their module path and class name.
'''
| mit |
zorroblue/scikit-learn | sklearn/tests/test_base.py | 1 | 14463 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
import sklearn
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
from sklearn.utils import deprecated
from sklearn.base import TransformerMixin
from sklearn.utils.mocking import MockDataFrame
import pickle
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class ModifyInitParams(BaseEstimator):
"""Deprecated behavior.
Equal parameters but with a type cast.
Doesn't fulfill a is a
"""
def __init__(self, a=np.array([0])):
self.a = a.copy()
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_copy_init_params():
# test for deprecation warning when copying or casting an init parameter
est = ModifyInitParams()
message = ("Estimator ModifyInitParams modifies parameters in __init__. "
"This behavior is deprecated as of 0.18 and support "
"for this behavior will be removed in 0.20.")
assert_warns_message(DeprecationWarning, message, clone, est)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline(
[('svc_cv', GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_set_params_passes_all_parameters():
# Make sure all parameters are passed together to set_params
# of nested estimator. Regression test for #9944
class TestDecisionTree(DecisionTreeClassifier):
def set_params(self, **kwargs):
super(TestDecisionTree, self).set_params(**kwargs)
# expected_kwargs is in test scope
assert kwargs == expected_kwargs
return self
expected_kwargs = {'max_depth': 5, 'min_samples_leaf': 2}
for est in [Pipeline([('estimator', TestDecisionTree())]),
GridSearchCV(TestDecisionTree(), {})]:
est.set_params(estimator__max_depth=5,
estimator__min_samples_leaf=2)
def test_score_sample_weight():
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
def test_clone_pandas_dataframe():
class DummyEstimator(BaseEstimator, TransformerMixin):
"""This is a dummy class for generating numerical features
This feature extractor extracts numerical features from pandas data
frame.
Parameters
----------
df: pandas data frame
The pandas data frame parameter.
Notes
-----
"""
def __init__(self, df=None, scalar_param=1):
self.df = df
self.scalar_param = scalar_param
def fit(self, X, y=None):
pass
def transform(self, X):
pass
# build and clone estimator
d = np.arange(10)
df = MockDataFrame(d)
e = DummyEstimator(df, scalar_param=1)
cloned_e = clone(e)
# the test
assert_true((e.df == cloned_e.df).values.all())
assert_equal(e.scalar_param, cloned_e.scalar_param)
def test_pickle_version_warning_is_not_raised_with_matching_version():
iris = datasets.load_iris()
tree = DecisionTreeClassifier().fit(iris.data, iris.target)
tree_pickle = pickle.dumps(tree)
assert_true(b"version" in tree_pickle)
tree_restored = assert_no_warnings(pickle.loads, tree_pickle)
# test that we can predict with the restored decision tree classifier
score_of_original = tree.score(iris.data, iris.target)
score_of_restored = tree_restored.score(iris.data, iris.target)
assert_equal(score_of_original, score_of_restored)
class TreeBadVersion(DecisionTreeClassifier):
def __getstate__(self):
return dict(self.__dict__.items(), _sklearn_version="something")
pickle_error_message = (
"Trying to unpickle estimator {estimator} from "
"version {old_version} when using version "
"{current_version}. This might "
"lead to breaking code or invalid results. "
"Use at your own risk.")
def test_pickle_version_warning_is_issued_upon_different_version():
iris = datasets.load_iris()
tree = TreeBadVersion().fit(iris.data, iris.target)
tree_pickle_other = pickle.dumps(tree)
message = pickle_error_message.format(estimator="TreeBadVersion",
old_version="something",
current_version=sklearn.__version__)
assert_warns_message(UserWarning, message, pickle.loads, tree_pickle_other)
class TreeNoVersion(DecisionTreeClassifier):
def __getstate__(self):
return self.__dict__
def test_pickle_version_warning_is_issued_when_no_version_info_in_pickle():
iris = datasets.load_iris()
# TreeNoVersion has no getstate, like pre-0.18
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
assert_false(b"version" in tree_pickle_noversion)
message = pickle_error_message.format(estimator="TreeNoVersion",
old_version="pre-0.18",
current_version=sklearn.__version__)
# check we got the warning about using pre-0.18 pickle
assert_warns_message(UserWarning, message, pickle.loads,
tree_pickle_noversion)
def test_pickle_version_no_warning_is_issued_with_non_sklearn_estimator():
iris = datasets.load_iris()
tree = TreeNoVersion().fit(iris.data, iris.target)
tree_pickle_noversion = pickle.dumps(tree)
try:
module_backup = TreeNoVersion.__module__
TreeNoVersion.__module__ = "notsklearn"
assert_no_warnings(pickle.loads, tree_pickle_noversion)
finally:
TreeNoVersion.__module__ = module_backup
class DontPickleAttributeMixin(object):
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
def __setstate__(self, state):
state["_restored"] = True
self.__dict__.update(state)
class MultiInheritanceEstimator(BaseEstimator, DontPickleAttributeMixin):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def test_pickling_when_getstate_is_overwritten_by_mixin():
estimator = MultiInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
assert_true(estimator_restored._restored)
def test_pickling_when_getstate_is_overwritten_by_mixin_outside_of_sklearn():
try:
estimator = MultiInheritanceEstimator()
text = "this attribute should not be pickled"
estimator._attribute_not_pickled = text
old_mod = type(estimator).__module__
type(estimator).__module__ = "notsklearn"
serialized = estimator.__getstate__()
assert_dict_equal(serialized, {'_attribute_not_pickled': None,
'attribute_pickled': 5})
serialized['attribute_pickled'] = 4
estimator.__setstate__(serialized)
assert_equal(estimator.attribute_pickled, 4)
assert_true(estimator._restored)
finally:
type(estimator).__module__ = old_mod
class SingleInheritanceEstimator(BaseEstimator):
def __init__(self, attribute_pickled=5):
self.attribute_pickled = attribute_pickled
self._attribute_not_pickled = None
def __getstate__(self):
data = self.__dict__.copy()
data["_attribute_not_pickled"] = None
return data
@ignore_warnings(category=(UserWarning))
def test_pickling_works_when_getstate_is_overwritten_in_the_child_class():
estimator = SingleInheritanceEstimator()
estimator._attribute_not_pickled = "this attribute should not be pickled"
serialized = pickle.dumps(estimator)
estimator_restored = pickle.loads(serialized)
assert_equal(estimator_restored.attribute_pickled, 5)
assert_equal(estimator_restored._attribute_not_pickled, None)
| bsd-3-clause |
rosswhitfield/mantid | qt/applications/workbench/workbench/plotting/figuremanager.py | 3 | 22835 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
"""Provides our custom figure manager to wrap the canvas, window and our custom toolbar"""
import copy
from distutils.version import LooseVersion
import io
import sys
import re
from functools import wraps
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.axes import Axes
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.collections import LineCollection
from mpl_toolkits.mplot3d.axes3d import Axes3D
from qtpy.QtCore import QObject, Qt
from qtpy.QtGui import QImage
from qtpy.QtWidgets import QApplication, QLabel, QFileDialog
from qtpy import QT_VERSION
from mantid.api import AnalysisDataService, AnalysisDataServiceObserver, ITableWorkspace, MatrixWorkspace
from mantid.kernel import logger
from mantid.plots import datafunctions, MantidAxes, axesfunctions
from mantidqt.io import open_a_file_dialog
from mantidqt.utils.qt.qappthreadcall import QAppThreadCall, force_method_calls_to_qapp_thread
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowser
from mantidqt.widgets.plotconfigdialog.presenter import PlotConfigDialogPresenter
from mantidqt.widgets.superplot import Superplot
from mantidqt.widgets.waterfallplotfillareadialog.presenter import WaterfallPlotFillAreaDialogPresenter
from mantidqt.widgets.waterfallplotoffsetdialog.presenter import WaterfallPlotOffsetDialogPresenter
from workbench.config import get_window_config
from workbench.plotting.mantidfigurecanvas import ( # noqa: F401
MantidFigureCanvas, draw_if_interactive as draw_if_interactive_impl, show as show_impl)
from workbench.plotting.figureinteraction import FigureInteraction
from workbench.plotting.figurewindow import FigureWindow
from workbench.plotting.plotscriptgenerator import generate_script
from workbench.plotting.toolbar import WorkbenchNavigationToolbar, ToolbarStateManager
from workbench.plotting.plothelppages import PlotHelpPages
def _replace_workspace_name_in_string(old_name, new_name, string):
return re.sub(rf'\b{old_name}\b', new_name, string)
def _catch_exceptions(func):
"""Catch all exceptions in method and print a traceback to stderr"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
sys.stderr.write("Error occurred in handler:\n")
import traceback
traceback.print_exc()
return wrapper
class FigureManagerADSObserver(AnalysisDataServiceObserver):
def __init__(self, manager):
super(FigureManagerADSObserver, self).__init__()
self.window = manager.window
self.canvas = manager.canvas
self.observeClear(True)
self.observeDelete(True)
self.observeReplace(True)
self.observeRename(True)
@_catch_exceptions
def clearHandle(self):
"""Called when the ADS is deleted all of its workspaces"""
self.window.emit_close()
@_catch_exceptions
def deleteHandle(self, _, workspace):
"""
Called when the ADS has deleted a workspace. Checks the
attached axes for any hold a plot from this workspace. If removing
this leaves empty axes then the parent window is triggered for
closer
:param _: The name of the workspace. Unused
:param workspace: A pointer to the workspace
"""
# Find the axes with this workspace reference
all_axes = self.canvas.figure.axes
if not all_axes:
return
# Here we wish to delete any curves linked to the workspace being
# deleted and if a figure is now empty, close it. We must avoid closing
# any figures that were created via the script window that are not
# managed via a workspace.
# See https://github.com/mantidproject/mantid/issues/25135.
empty_axes = []
redraw = False
for ax in all_axes:
if isinstance(ax, MantidAxes):
to_redraw = ax.remove_workspace_artists(workspace)
else:
to_redraw = False
# Solution for filtering out colorbar axes. Works most of the time.
if type(ax) is not Axes:
empty_axes.append(MantidAxes.is_empty(ax))
redraw = redraw | to_redraw
if all(empty_axes):
self.window.emit_close()
elif redraw:
self.canvas.draw()
@_catch_exceptions
def replaceHandle(self, _, workspace):
"""
Called when the ADS has replaced a workspace with one of the same name.
If this workspace is attached to this figure then its data is updated
:param _: The name of the workspace. Unused
:param workspace: A reference to the new workspace
"""
redraw = False
for ax in self.canvas.figure.axes:
if isinstance(ax, MantidAxes):
redraw_this = ax.replace_workspace_artists(workspace)
else:
continue
redraw = redraw | redraw_this
if redraw:
self.canvas.draw()
@_catch_exceptions
def renameHandle(self, oldName, newName):
"""
Called when the ADS has renamed a workspace.
If this workspace is attached to this figure then the figure name is updated, as are the artists names and
axis creation arguments
:param oldName: The old name of the workspace.
:param newName: The new name of the workspace
"""
for ax in self.canvas.figure.axes:
if isinstance(ax, MantidAxes):
ws = AnalysisDataService.retrieve(newName)
if isinstance(ws, MatrixWorkspace):
ax.rename_workspace(newName, oldName)
elif isinstance(ws, ITableWorkspace):
ax.wsName = newName
ax.make_legend()
ax.set_title(_replace_workspace_name_in_string(oldName, newName, ax.get_title()))
self.canvas.set_window_title(
_replace_workspace_name_in_string(oldName, newName, self.canvas.get_window_title()))
self.canvas.draw()
class FigureManagerWorkbench(FigureManagerBase, QObject):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
assert QAppThreadCall.is_qapp_thread(
), "FigureManagerWorkbench cannot be created outside of the QApplication thread"
QObject.__init__(self)
FigureManagerBase.__init__(self, canvas, num)
parent, flags = get_window_config()
self.window = FigureWindow(canvas, parent=parent, window_flags=flags)
self.window.activated.connect(self._window_activated)
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self.destroy)
self.window.visibility_changed.connect(self.fig_visibility_changed)
self.window.setWindowTitle("Figure %d" % num)
canvas.figure.set_label("Figure %d" % num)
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
canvas.setFocusPolicy(Qt.StrongFocus)
canvas.setFocus()
self.window._destroying = False
# add text label to status bar
self.statusbar_label = QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
self.plot_options_dialog = None
self.toolbar = self._get_toolbar(canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self.statusbar_label.setText)
self.toolbar.sig_grid_toggle_triggered.connect(self.grid_toggle)
self.toolbar.sig_toggle_fit_triggered.connect(self.fit_toggle)
self.toolbar.sig_toggle_superplot_triggered.connect(self.superplot_toggle)
self.toolbar.sig_copy_to_clipboard_triggered.connect(self.copy_to_clipboard)
self.toolbar.sig_plot_options_triggered.connect(self.launch_plot_options)
self.toolbar.sig_plot_help_triggered.connect(self.launch_plot_help)
self.toolbar.sig_generate_plot_script_clipboard_triggered.connect(
self.generate_plot_script_clipboard)
self.toolbar.sig_generate_plot_script_file_triggered.connect(
self.generate_plot_script_file)
self.toolbar.sig_home_clicked.connect(self.set_figure_zoom_to_display_all)
self.toolbar.sig_waterfall_reverse_order_triggered.connect(
self.waterfall_reverse_line_order)
self.toolbar.sig_waterfall_offset_amount_triggered.connect(
self.launch_waterfall_offset_options)
self.toolbar.sig_waterfall_fill_area_triggered.connect(
self.launch_waterfall_fill_area_options)
self.toolbar.sig_waterfall_conversion.connect(self.update_toolbar_waterfall_plot)
self.toolbar.sig_change_line_collection_colour_triggered.connect(
self.change_line_collection_colour)
self.toolbar.setFloatable(False)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.fit_browser = FitPropertyBrowser(canvas, ToolbarStateManager(self.toolbar))
self.fit_browser.closing.connect(self.handle_fit_browser_close)
self.window.setCentralWidget(canvas)
self.window.addDockWidget(Qt.LeftDockWidgetArea, self.fit_browser)
self.superplot = None
# Need this line to stop the bug where the dock window snaps back to its original size after resizing.
# 0 argument is arbitrary and has no effect on fit widget size
# This is a qt bug reported at (https://bugreports.qt.io/browse/QTBUG-65592)
if QT_VERSION >= LooseVersion("5.6"):
self.window.resizeDocks([self.fit_browser], [1], Qt.Horizontal)
self.fit_browser.hide()
if matplotlib.is_interactive():
self.window.show()
canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
canvas.figure.add_axobserver(notify_axes_change)
# Register canvas observers
self._fig_interaction = FigureInteraction(self)
self._ads_observer = FigureManagerADSObserver(self)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _window_activated(self):
Gcf.set_active(self)
def _get_toolbar(self, canvas, parent):
return WorkbenchNavigationToolbar(canvas, parent, False)
def resize(self, width, height):
"""Set the canvas size in pixels"""
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
if self.window.windowState() & Qt.WindowMinimized:
# windowState() stores a combination of window state enums
# and multiple window states can be valid. On Windows
# a window can be both minimized and maximized at the
# same time, so we make a check here. For more info see:
# http://doc.qt.io/qt-5/qt.html#WindowState-enum
if self.window.windowState() & Qt.WindowMaximized:
self.window.setWindowState(Qt.WindowMaximized)
else:
self.window.setWindowState(Qt.WindowNoState)
# Hack to ensure the canvas is up to date
self.canvas.draw_idle()
if self.toolbar:
self.toolbar.set_buttons_visibility(self.canvas.figure)
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
if self.toolbar:
self.toolbar.destroy()
self._ads_observer.observeAll(False)
self._ads_observer = None
# disconnect window events before calling Gcf.destroy. window.close is not guaranteed to
# delete the object and do this for us. On macOS it was observed that closing the figure window
# would produce an extraneous activated event that would add a new figure to the plots list
# right after deleted the old one.
self.window.disconnect()
self._fig_interaction.disconnect()
self.window.close()
if self.superplot:
self.superplot.close()
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def launch_plot_options(self):
self.plot_options_dialog = PlotConfigDialogPresenter(self.canvas.figure, parent=self.window)
def launch_plot_options_on_curves_tab(self, axes, curve):
self.plot_options_dialog = PlotConfigDialogPresenter(self.canvas.figure, parent=self.window)
self.plot_options_dialog.configure_curves_tab(axes, curve)
def launch_plot_help(self):
PlotHelpPages.show_help_page_for_figure(self.canvas.figure)
def copy_to_clipboard(self):
"""Copy the current figure image to clipboard"""
# store the image in a buffer using savefig(), this has the
# advantage of applying all the default savefig parameters
# such as background color; those would be ignored if you simply
# grab the canvas using Qt
buf = io.BytesIO()
self.canvas.figure.savefig(buf)
QApplication.clipboard().setImage(QImage.fromData(buf.getvalue()))
buf.close()
def grid_toggle(self, on):
"""Toggle grid lines on/off"""
canvas = self.canvas
axes = canvas.figure.get_axes()
for ax in axes:
if type(ax) == Axes:
# Colorbar
continue
elif isinstance(ax, Axes3D):
# The grid toggle function for 3D plots doesn't let you choose between major and minor lines.
ax.grid(on)
else:
which = 'both' if hasattr(
ax, 'show_minor_gridlines') and ax.show_minor_gridlines else 'major'
ax.grid(on, which=which)
canvas.draw_idle()
def fit_toggle(self):
"""Toggle fit browser and tool on/off"""
if self.fit_browser.isVisible():
self.fit_browser.hide()
self.toolbar._actions["toggle_superplot"].setEnabled(True)
else:
self.fit_browser.show()
self.toolbar._actions["toggle_superplot"].setEnabled(False)
def superplot_toggle(self):
"""Toggle superplot dockwidgets on/off"""
if self.superplot:
self.window.removeDockWidget(self.superplot.get_side_view())
self.window.removeDockWidget(self.superplot.get_bottom_view())
self.superplot.close()
self.superplot = None
self.toolbar._actions["toggle_fit"].setEnabled(True)
self.toolbar._actions["toggle_superplot"].setChecked(False)
else:
self.superplot = Superplot(self.canvas, self.window)
self.window.addDockWidget(Qt.LeftDockWidgetArea,
self.superplot.get_side_view())
self.window.addDockWidget(Qt.BottomDockWidgetArea,
self.superplot.get_bottom_view())
self.toolbar._actions["toggle_fit"].setEnabled(False)
self.toolbar._actions["toggle_superplot"].setChecked(True)
self.superplot.get_bottom_view().setFocus()
def handle_fit_browser_close(self):
"""
Respond to a signal that user closed self.fit_browser by
clicking the [x] button.
"""
self.toolbar.trigger_fit_toggle_action()
def hold(self):
""" Mark this figure as held"""
self.toolbar.hold()
def get_window_title(self):
return self.window.windowTitle()
def set_window_title(self, title):
self.window.setWindowTitle(title)
# We need to add a call to the figure manager here to call
# notify methods when a figure is renamed, to update our
# plot list.
Gcf.figure_title_changed(self.num)
# For the workbench we also keep the label in sync, this is
# to allow getting a handle as plt.figure('Figure Name')
self.canvas.figure.set_label(title)
def fig_visibility_changed(self):
"""
Make a notification in the global figure manager that
plot visibility was changed. This method is added to this
class so that it can be wrapped in a QAppThreadCall.
"""
Gcf.figure_visibility_changed(self.num)
def generate_plot_script_clipboard(self):
script = generate_script(self.canvas.figure, exclude_headers=True)
QApplication.clipboard().setText(script)
logger.notice("Plotting script copied to clipboard.")
def generate_plot_script_file(self):
script = generate_script(self.canvas.figure)
filepath = open_a_file_dialog(parent=self.canvas,
default_suffix=".py",
file_filter="Python Files (*.py)",
accept_mode=QFileDialog.AcceptSave,
file_mode=QFileDialog.AnyFile)
if filepath:
try:
with open(filepath, 'w') as f:
f.write(script)
except IOError as io_error:
logger.error("Could not write file: {}\n{}" "".format(filepath, io_error))
def set_figure_zoom_to_display_all(self):
axes = self.canvas.figure.get_axes()
if axes:
for ax in axes:
# We check for axes type below as a pseudo check for an axes being
# a colorbar. this is based on the same check in
# FigureManagerADSObserver.deleteHandle.
if type(ax) is not Axes:
if ax.lines: # Relim causes issues with colour plots, which have no lines.
ax.relim()
elif isinstance(ax, Axes3D):
if hasattr(ax, 'original_data_surface'):
ax.collections[0]._vec = copy.deepcopy(ax.original_data_surface)
elif hasattr(ax, 'original_data_wireframe'):
ax.collections[0].set_segments(copy.deepcopy(ax.original_data_wireframe))
else:
ax.view_init()
elif ax.images:
axesfunctions.update_colorplot_datalimits(ax, ax.images)
continue
ax.autoscale()
self.canvas.draw()
def waterfall_reverse_line_order(self):
ax = self.canvas.figure.get_axes()[0]
x, y = ax.waterfall_x_offset, ax.waterfall_y_offset
fills = datafunctions.get_waterfall_fills(ax)
ax.update_waterfall(0, 0)
errorbar_cap_lines = datafunctions.remove_and_return_errorbar_cap_lines(ax)
ax.lines.reverse()
ax.lines += errorbar_cap_lines
ax.collections += fills
ax.collections.reverse()
ax.update_waterfall(x, y)
if ax.get_legend():
ax.make_legend()
self.canvas.draw()
def launch_waterfall_offset_options(self):
WaterfallPlotOffsetDialogPresenter(self.canvas.figure, parent=self.window)
def launch_waterfall_fill_area_options(self):
WaterfallPlotFillAreaDialogPresenter(self.canvas.figure, parent=self.window)
def update_toolbar_waterfall_plot(self, is_waterfall):
self.toolbar.set_waterfall_options_enabled(is_waterfall)
self.toolbar.set_fit_enabled(not is_waterfall)
self.toolbar.set_generate_plot_script_enabled(not is_waterfall)
def change_line_collection_colour(self, colour):
for col in self.canvas.figure.get_axes()[0].collections:
if isinstance(col, LineCollection):
col.set_color(colour.name())
self.canvas.draw()
# -----------------------------------------------------------------------------
# Figure control
# -----------------------------------------------------------------------------
def new_figure_manager(num, *args, **kwargs):
"""Create a new figure manager instance"""
from matplotlib.figure import Figure # noqa
figure_class = kwargs.pop('FigureClass', Figure)
this_fig = figure_class(*args, **kwargs)
return new_figure_manager_given_figure(num, this_fig)
def new_figure_manager_given_figure(num, figure):
"""Create a new manager from a num & figure """
def _new_figure_manager_given_figure_impl(num: int, figure):
"""Create a new figure manager instance for the given figure.
Forces all public and non-dunder method calls onto the QApplication thread.
"""
canvas = MantidFigureCanvas(figure)
return force_method_calls_to_qapp_thread(FigureManagerWorkbench(canvas, num))
# figure manager & canvas must be created on the QApplication thread
return QAppThreadCall(_new_figure_manager_given_figure_impl)(int(num), figure)
| gpl-3.0 |
sunny94/temp | sympy/utilities/runtests.py | 12 | 80104 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import (exec_, PY3, get_function_code,
string_types, xrange)
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in xrange(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in xrange(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist.extend([
"sympy/mpmath", # needs to be fixed upstream
])
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in xrange(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in xrange(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/mpmath", # needs to be fixed upstream
"sympy/mpmath", # needs to be fixed upstream
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# pytest = import_module('pytest')
# py = import_module('py')
# if py is None or pytest is None:
# blacklist.extend([
# "sympy/conftest.py",
# "sympy/utilities/benchmarking.py"
# ])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
# out = pdoctest.testfile(
# rst_file, module_relative=False, encoding='utf-8',
# optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE)
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
# print "EXE %s found %s" %(ex, found)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
# print "EXTMODULE matplotlib version %s found" % \
# matplotlib.__version__
else:
# print "EXTMODULE matplotlib > 1.0.0 not found"
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
# print "EXTMODULE %s version %s found" %(extmod, version)
else:
# print "EXTMODULE %s not found" %(extmod)
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
return int(columns)
except ValueError:
pass
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <[email protected]>
# Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
alivecor/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 52 | 69800 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
self.assertEqual(50, step_counter.steps)
else:
# Occasionally, training stops when global_step == 101, due to a race
# condition.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
Scoudem/audiolyze | plotable.py | 1 | 5487 | '''
File: plotable.py
Author: Tristan van Vaalen
Plotable data stream
'''
import collections
import numpy
import audiolazy
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import verbose
v = verbose.Verbose()
class Plotable:
def __init__(self, filt, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
self.filt = filt
self.window = numpy.array(audiolazy.window.hamming(self.length))
self.data = collections.deque([0.] * self.length, maxlen=self.length)
self.seconds, self.hertz = audiolazy.sHz(self.rate)
self.miliseconds = 1e-3 * self.seconds
self.setup_plot()
def append(self, element):
self.data.append(element)
def setup_plot(self, title='Audio stream analysis'):
if self.record:
self.figure = plt.figure(
title,
facecolor='#cccccc'
)
self.time_values = numpy.array(
list(
audiolazy.line(
self.length, -self.length / self.miliseconds, 0
)
)
)
v.debug('Buffer size: {}ms (t={} to t={})'.format(
abs(self.time_values[0]) - self.time_values[-1],
self.time_values[0], self.time_values[-1]
))
self.freq_values = numpy.array(
audiolazy.line(self.length, 0, 2 * audiolazy.pi / self.hertz)
.take(self.length // 2 + 1)
)
v.debug('Frequency range: {}Hz to {}Hz'.format(
self.freq_values[0], self.freq_values[-1]
))
self.dft_max_min, self.dft_max_max = 0.01, 1.0
xlim_t = (self.time_values[0], self.time_values[-1])
ylim_t = (-1., 1.)
xlim_f = (self.freq_values[0], self.freq_values[-1])
ylim_f = (0., .5 * (self.dft_max_max + self.dft_max_min))
self.time_ax, self.time_line = self._subplot_and_line(
1, xlim_t, ylim_t, '#00aaff', 'Time (ms)'
)
self.time_filt_ax, self.time_filt_line = self._subplot_and_line(
2, xlim_t, ylim_t, '#aa00ff', 'Filtered Time (ms)'
)
self.freq_ax, self.freq_line = self._subplot_and_line(
3, xlim_f, ylim_f, '#00aaff', 'Frequency (Hz)'
)
self.freq_filt_ax, self.freq_filt_line = self._subplot_and_line(
4, xlim_f, ylim_f, '#aa00ff', 'Filtered Frequency (Hz)'
)
if self.response:
v.debug('Plotting frequency response')
self.filt.plot()
if self.zplot:
v.debug('Plotting zero-pole plane')
self.filt.zplot()
def update_y_lim(self, ax, ax2, smax):
top = ax.get_ylim()[1]
if top < self.dft_max_max and abs(smax / top) > 1:
ax.set_ylim(top=top * 2)
ax2.set_ylim(top=top * 2)
return True
elif top > self.dft_max_min and abs(smax / top) < .2:
ax.set_ylim(top=top / 2)
ax2.set_ylim(top=top / 2)
return True
return False
def _subplot_and_line(self, index, xlim, ylim, color, label):
ax = plt.subplot(
2, 2, index,
xlim=xlim,
ylim=ylim,
axisbg='black'
)
ax.set_xlabel(label)
line = ax.plot(
[], [], linewidth=2, color=color
)[0]
return ax, line
def start_animation(self):
if self.record:
v.debug('Starting animation')
v.info('Large window size can seriously slow down rendering')
self.rempty = False
self.anim = FuncAnimation(
self.figure,
self.animate,
init_func=self.init,
interval=10,
blit=True
)
# plt.ioff()
plt.show()
def init(self):
self.time_line.set_data([], [])
self.freq_line.set_data([], [])
self.time_filt_line.set_data([], [])
self.freq_filt_line.set_data([], [])
self.figure.tight_layout()
if self.rempty:
return []
else:
return [
self.time_line,
self.freq_line,
self.time_filt_line,
self.freq_filt_line
]
def animate(self, idx):
if idx == 100:
plt.savefig('test.png')
array_data = numpy.array(self.data)
array_data_filt = self.filt(array_data).take(audiolazy.inf)
spectrum = numpy.abs(numpy.fft.rfft(array_data * self.window)) /\
self.length
spectrum_filt = numpy.abs(numpy.fft.rfft(array_data_filt * self.window)) /\
self.length
self.time_line.set_data(self.time_values, array_data)
self.time_filt_line.set_data(self.time_values, array_data_filt)
self.freq_line.set_data(self.freq_values, spectrum)
self.freq_filt_line.set_data(self.freq_values, spectrum_filt)
smax = spectrum.max()
s1 = self.update_y_lim(self.freq_ax, self.freq_filt_ax, smax)
if not s1:
self.rempty = True
return [
self.time_line,
self.freq_line,
self.time_filt_line,
self.freq_filt_line
]
return []
| mit |
costypetrisor/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
OshynSong/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
eramirem/astroML | book_figures/chapter2/fig_search_scaling.py | 3 | 2847 | """
Search Algorithm Scaling
------------------------
Figure 2.1.
The scaling of two methods to search for an item in an ordered list: a linear
method which performs a comparison on all N items, and a binary search which
uses a more sophisticated algorithm. The theoretical scalings are shown by
dashed lines.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from time import time
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Compute the execution times as a function of array size
Nsamples = 10 ** np.linspace(6.0, 7.8, 17)
time_linear = np.zeros_like(Nsamples)
time_binary = np.zeros_like(Nsamples)
for i in range(len(Nsamples)):
# create a sorted array
x = np.arange(Nsamples[i], dtype=int)
# Linear search: choose a single item in the array
item = int(0.4 * Nsamples[i])
t0 = time()
j = np.where(x == item)
t1 = time()
time_linear[i] = t1 - t0
# Binary search: this is much faster, so choose 1000 items to search for
items = np.linspace(0, Nsamples[i], 1000).astype(int)
t0 = time()
j = np.searchsorted(x, items)
t1 = time()
time_binary[i] = (t1 - t0)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(bottom=0.15)
ax = plt.axes(xscale='log', yscale='log')
ax.grid()
# plot the observed times
ax.plot(Nsamples, time_linear, 'ok', color='gray', markersize=5,
label=r'linear search $(\mathcal{O}[N])$')
ax.plot(Nsamples, time_binary, 'sk', color='gray', markersize=5,
label=r'efficient search $(\mathcal{O}[\log N])$')
# plot the expected scaling
scale = 10 ** np.linspace(5, 8, 100)
scaling_N = scale * time_linear[7] / Nsamples[7]
scaling_logN = np.log(scale) * time_binary[7] / np.log(Nsamples[7])
ax.plot(scale, scaling_N, '--k')
ax.plot(scale, scaling_logN, '--k')
ax.set_xlim(9E5, 1E8)
# add text and labels
ax.set_title("Scaling of Search Algorithms")
ax.set_xlabel('Length of Array')
ax.set_ylabel('Relative search time')
ax.legend(loc='upper left')
plt.show()
| bsd-2-clause |
mmottahedi/neuralnilm_prototype | scripts/e178.py | 2 | 6337 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for BLSTM
e110
* Back to Uniform(5) for BLSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single BLSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd BLSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using BLSTM not BBLSTM
e151
* Max pooling
171
lower learning rate
172
even lower learning rate
173
slightly higher learning rate!
175
same as 174 but with skip prob = 0, and LSTM not BLSTM, and only 4000 epochs
176
new cost function
177
another new cost func (this one avoids NaNs)
skip prob 0.7
10x higher learning rate
178
refactored cost func (functionally equiv to 177)
0.1x learning rate
"""
# def scaled_cost(x, t):
# raw_cost = (x - t) ** 2
# energy_per_seq = t.sum(axis=1)
# energy_per_batch = energy_per_seq.sum(axis=1)
# energy_per_batch = energy_per_batch.reshape((-1, 1))
# normaliser = energy_per_seq / energy_per_batch
# cost = raw_cost.mean(axis=1) * (1 - normaliser)
# return cost.mean()
from theano.ifelse import ifelse
import theano.tensor as T
THRESHOLD = 0
def scaled_cost(x, t):
sq_error = (x - t) ** 2
def mask_and_mean_sq_error(mask):
masked_sq_error = sq_error[mask.nonzero()]
mean = masked_sq_error.mean()
mean = ifelse(T.isnan(mean), 0.0, mean)
return mean
above_thresh_mean = mask_and_mean_sq_error(t > THRESHOLD)
below_thresh_mean = mask_and_mean_sq_error(t <= THRESHOLD)
return (above_thresh_mean + below_thresh_mean) / 2.0
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=None,#[200, 100, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=scaled_cost,
updates=partial(nesterov_momentum, learning_rate=.00001, clip_range=(-1, 1)),
layers_config=[
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(1),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Uniform(25)
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
raise
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| mit |
halexand/NB_Distribution | Venn2_Diagrams_MMETSP_140513.py | 2 | 3206 | #!/usr/bin/env python
'''
Created on November 9, 2013
VennDiagram of Species composition
@author: harrietalexander
'''
import sys
import glob
import os
import numpy as np
import itertools
import re
import csv
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.backends.backend_pdf as PdfPages
import matplotlib as mpl
from random import shuffle
os.chdir('/Users/harrietalexander/Analysis/NB_Distribution/')
ManClus=csv.reader(open('MMETSP_140513_Cluster.tab'),delimiter='\t') #Manual clusters
AllClus=csv.reader(open('MMETSP_HigherOrder.tab'),delimiter='\t')
ClusCount=csv.reader(open('SummedSpecies.tab'), delimiter='\t')
mpl.rcParams['pdf.fonttype'] = 42
#LOAD CLUSTERING
MC_hash={}
for line in ManClus:
Clus=line[-1].strip()
MMETSP=line[0].strip()
if Clus in MC_hash:
MC_hash[Clus].append(MMETSP)
else:
MC_hash[Clus]=[MMETSP]
All_hash=[]
hash={}
#LOAD MMETSP DATA
MMETSP_Hash={}
for line in AllClus:
key=line[0].strip()
Class=line[1].strip()
Order=line[2].strip()
Family=line[3].strip()
Genus=line[4].strip()
MMETSP_Hash[key]=[Class,Order,Family,Genus]
#LOUAD COUNTS
Count_hash={}
for line in ClusCount:
c=0
numList=[]
for i in line:
i=i.strip()
if c==0:
Clus=i
c+=1
else:
numList.append(int(i))
Count_hash[Clus]=numList
newHash={}
for key in MC_hash:
gID=MC_hash[key][0]
newHash[key]=MMETSP_Hash[gID]
#CREAT HASH OF SPECIES FOR EACH CATAGORY
Chash={}
Ohash={}
Fhash={}
Ghash={}
Ahash=[Chash,Ohash,Fhash,Ghash]
for key in newHash:
MM=newHash[key]
for h,m in zip(Ahash,MM):
if m in h:
h[m].append(key)
else:
h[m]=[key]
#CREAT HASH OF COUNTS FOR EACH CATAGORY
Ccount={}
Ocount={}
Fcount={}
Gcount={}
Acount=[Ccount,Ocount,Fcount,Gcount]
for m,c in zip(Ahash, Acount):
for key in m:
print key
for i in m[key]:
print i
if i in Count_hash:
if key in c:
c[key]=[x+y for x,y in zip(c[key], Count_hash[i])]
else:
c[key]=Count_hash[i]
# #Plot for E7
outdir='/Users/harrietalexander/Dropbox/NB_Paper/'
nns=['Family','Class','Order','Genus']
cc=1
for Count_hash,nn in zip(Acount,nns):
nums=[]
for i in numList:
nums.append([])
Name_list=[]
for key in Count_hash:
Name_list.append(key)
for i in range(len(numList)):
nums[i].append(Count_hash[key][i])
fig=plt.figure(cc)
fig.suptitle(nn)
names=['S1', 'S2', 'S3', 'S4','S5', '+N', '-N', '+P', '-P','C']
axs=[]
for x in range(len(names)):
ax1=fig.add_subplot(len(names),1,x+1,aspect='equal')
axs.append(ax1)
##Create color map
cmap=plt.cm.gist_rainbow
colors=cmap(np.linspace(0.,1.,len(nums[1])))
colors_shuffle=[]
index_shuf=range(len(colors))
shuffle(index_shuf)
for i in index_shuf:
colors_shuffle.append(colors[i])
both=zip(*nums)
tboth=zip(both, Name_list)
sboth = sorted(tboth)
Name_list = [p[-1] for p in sboth]
tboth = [p[0] for p in sboth]
nums=[list(t) for t in zip(*tboth)]
for J,ax,t in zip(nums,axs,names):
slices=J
pie_wedge_collection=ax.pie(slices, colors=colors)
ax.set_title(t)
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('none')
ll=ax.legend(Name_list, loc=4, ncol=4,fontsize='10')
cc+=1
plt.savefig(outdir+"NB_"+nn+".pdf")
plt.show()
| mit |
aeklant/scipy | scipy/signal/spectral.py | 3 | 73376 | """Tools for spectral analysis.
"""
import numpy as np
from scipy import fft as sp_fft
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA', 'check_NOLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be 1-D and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
welch: Power spectral density by Welch's method
spectrogram: Spectrogram by Welch's method
csd: Cross spectral density by Welch's method
Examples
--------
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[tuple(s)]
nperseg = nfft
nfft = None
return welch(x, fs=fs, window=window, nperseg=nperseg, noverlap=0,
nfft=nfft, detrend=detrend, return_onesided=return_onesided,
scaling=scaling, axis=axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
If we now introduce a discontinuity in the signal, by increasing the
amplitude of a small portion of the signal by 50, we can see the
corruption of the mean average power spectral density, but using a
median average better estimates the normal behaviour.
>>> x[int(N//2):int(N//2)+10] *= 50.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> f_med, Pxx_den_med = signal.welch(x, fs, nperseg=1024, average='median')
>>> plt.semilogy(f, Pxx_den, label='mean')
>>> plt.semilogy(f_med, Pxx_den_med, label='median')
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.legend()
>>> plt.show()
"""
freqs, Pxx = csd(x, x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
return_onesided=return_onesided, scaling=scaling,
axis=axis, average=average)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1, average='mean'):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
average : { 'mean', 'median' }, optional
Method to use when averaging periodograms. Defaults to 'mean'.
.. versionadded:: 1.2.0
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
if average == 'median':
Pxy = np.median(Pxy, axis=-1) / _median_bias(Pxy.shape[-1])
elif average == 'mean':
Pxy = Pxy.mean(axis=-1)
else:
raise ValueError('average must be "median" or "mean", got %s'
% (average,))
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey', .25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> from scipy.fft import fftshift
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
Note, if using output that is not one sided, then use the following:
>>> f, t, Sxx = signal.spectrogram(x, fs, return_onesided=False)
>>> plt.pcolormesh(t, fftshift(f), fftshift(Sxx, axes=0))
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, it is sufficient that the signal windowing obeys the constraint of
"Constant OverLap Add" (COLA). This ensures that every point in the input
data is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def check_NOLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Nonzero Overlap Add (NOLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies the NOLA constraint within
`tol`, `False` otherwise
See Also
--------
check_COLA: Check whether the Constant OverLap Add (COLA) constraint is met
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
for all :math:`n`, where :math:`w` is the window function, :math:`t` is the
frame index, and :math:`H` is the hop size (:math:`H` = `nperseg` -
`noverlap`).
This ensures that the normalization factors in the denominator of the
overlap-add inversion equation are not zero. Only very pathological windows
will fail the NOLA constraint.
.. versionadded:: 1.2.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm NOLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_NOLA(signal.boxcar(100), 100, 75)
True
NOLA is also true for 25% (1/4) overlap:
>>> signal.check_NOLA(signal.boxcar(100), 100, 25)
True
"Symmetrical" Hann window (for filter design) is also NOLA:
>>> signal.check_NOLA(signal.hann(120, sym=True), 120, 60)
True
As long as there is overlap, it takes quite a pathological window to fail
NOLA:
>>> w = np.ones(64, dtype="float")
>>> w[::2] = 0
>>> signal.check_NOLA(w, 64, 32)
False
If there is not enough overlap, a window with zeros at the ends will not
work:
>>> signal.check_NOLA(signal.hann(64), 64, 0)
False
>>> signal.check_NOLA(signal.hann(64), 64, 1)
False
>>> signal.check_NOLA(signal.hann(64), 64, 2)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg')
if noverlap < 0:
raise ValueError('noverlap must be a nonnegative integer')
noverlap = int(noverlap)
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step]**2 for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]**2
return np.min(binsums) > tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Nonzero
OverLap Add" (NOLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
Given a time-domain signal :math:`x[n]`, a window :math:`w[n]`, and a hop
size :math:`H` = `nperseg - noverlap`, the windowed frame at time index
:math:`t` is given by
.. math:: x_{t}[n]=x[n]w[n-tH]
The overlap-add (OLA) reconstruction equation is given by
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
The NOLA constraint ensures that every normalization term that appears
in the denomimator of the OLA reconstruction equation is nonzero. Whether a
choice of `window`, `nperseg`, and `noverlap` satisfy this constraint can
be tested with `check_NOLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is `True`,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
check_NOLA: Check whether the Nonzero Overlap Add (NOLA) constraint is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "nonzero
overlap add" (NOLA):
.. math:: \sum_{t}w^{2}[n-tH] \ne 0
This ensures that the normalization factors that appear in the denominator
of the overlap-add reconstruction equation
.. math:: x[n]=\frac{\sum_{t}x_{t}[n]w[n-tH]}{\sum_{t}w^{2}[n-tH]}
are not zero. The NOLA constraint can be checked with the `check_NOLA`
function.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares estimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Lim "Signal Estimation from
Modified Short-Time Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, str) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
ifunc = sp_fft.irfft if input_onesided else sp_fft.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
norm = norm[..., nperseg//2:-(nperseg//2)]
# Divide out normalization where non-tiny
if np.sum(norm > 1e-10) != len(norm):
warnings.warn("NOLA condition failed, STFT may not be invertible")
x /= np.where(norm > 1e-10, norm, 1.0)
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend,
axis=axis)
_, Pyy = welch(y, fs=fs, window=window, nperseg=nperseg, noverlap=noverlap,
nfft=nfft, detrend=detrend, axis=axis)
_, Pxy = csd(x, y, fs=fs, window=window, nperseg=nperseg,
noverlap=noverlap, nfft=nfft, detrend=detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Defaults to `True`, but for
complex data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg, input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win, np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = sp_fft.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = sp_fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# https://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = sp_fft.fft
else:
result = result.real
func = sp_fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg, input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
----------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
# parse window; if array like, then set nperseg = win.shape
if isinstance(window, str) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different"
" from length of window")
return win, nperseg
def _median_bias(n):
"""
Returns the bias of the median of a set of periodograms relative to
the mean.
See arXiv:gr-qc/0509116 Appendix B for details.
Parameters
----------
n : int
Numbers of periodograms being averaged.
Returns
-------
bias : float
Calculated bias.
"""
ii_2 = 2 * np.arange(1., (n-1) // 2 + 1)
return 1 + np.sum(1. / (ii_2 + 1) - 1. / ii_2)
| bsd-3-clause |
seanli9jan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/debug_test.py | 40 | 32402 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Debug estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import operator
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import debug
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
NUM_EXAMPLES = 100
N_CLASSES = 5 # Cardinality of multiclass labels.
LABEL_DIMENSION = 3 # Dimensionality of regression labels.
def _train_test_split(features_and_labels):
features, labels = features_and_labels
train_set = (features[:int(len(features) / 2)],
labels[:int(len(features) / 2)])
test_set = (features[int(len(features) / 2):],
labels[int(len(features) / 2):])
return train_set, test_set
def _input_fn_builder(features, labels):
def input_fn():
feature_dict = {'features': constant_op.constant(features)}
my_labels = labels
if my_labels is not None:
my_labels = constant_op.constant(my_labels)
return feature_dict, my_labels
return input_fn
class DebugClassifierTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.labels = np.random.choice(
range(N_CLASSES), p=[0.1, 0.3, 0.4, 0.1, 0.1], size=NUM_EXAMPLES)
self.binary_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
self.binary_float_labels = np.random.choice(
range(2), p=[0.2, 0.8], size=NUM_EXAMPLES)
def testPredict(self):
"""Tests that DebugClassifier outputs the majority class."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictBinary(self):
"""Same as above for binary predictions."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
majority_class, _ = max(
collections.Counter(train_labels).items(), key=operator.itemgetter(1))
expected_prediction = np.vstack(
[[majority_class] for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_classes(
input_fn=_input_fn_builder(test_features, None))
self.assertAllEqual(expected_prediction, np.vstack(pred))
def testPredictProba(self):
"""Tests that DebugClassifier outputs observed class distribution."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.labels])
class_distribution = np.zeros((1, N_CLASSES))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=N_CLASSES)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testPredictProbaBinary(self):
"""Same as above but for binary classification."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.binary_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, label] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
(train_features,
train_labels), (test_features, test_labels) = _train_test_split(
[self.features, self.binary_float_labels])
class_distribution = np.zeros((1, 2))
for label in train_labels:
class_distribution[0, int(label)] += 1
class_distribution /= len(train_labels)
expected_prediction = np.vstack(
[class_distribution for _ in range(test_labels.shape[0])])
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_proba(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugClassifier(n_classes=3),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def _assertInRange(self, expected_min, expected_max, actual):
self.assertLessEqual(expected_min, actual)
self.assertGreaterEqual(expected_max, actual)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
classifier.fit(input_fn=input_fn, steps=5)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(x=train_x, y=train_y, steps=5)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def _assertBinaryPredictions(self, expected_len, predictions):
self.assertEqual(expected_len, len(predictions))
for prediction in predictions:
self.assertIn(prediction, (0, 1))
def _assertProbabilities(self, expected_batch_size, expected_n_classes,
probabilities):
self.assertEqual(expected_batch_size, len(probabilities))
for b in range(expected_batch_size):
self.assertEqual(expected_n_classes, len(probabilities[b]))
for i in range(expected_n_classes):
self._assertInRange(0.0, 1.0, probabilities[b][i])
def testLogisticRegression_TensorData(self):
"""Tests binary classification using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
def testLogisticRegression_FloatLabel(self):
"""Tests binary classification with float labels."""
def _input_fn_float_label(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[50], [20], [10]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_float_label, steps=50)
predict_input_fn = functools.partial(_input_fn_float_label, num_epochs=1)
predictions = list(classifier.predict_classes(input_fn=predict_input_fn))
self._assertBinaryPredictions(3, predictions)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
self._assertProbabilities(3, 2, predictions_proba)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
classifier = debug.DebugClassifier(n_classes=3)
input_fn = test_data.iris_input_multiclass_fn
classifier.fit(input_fn=input_fn, steps=200)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
self.assertIn('loss', scores)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(input_fn=_input_fn, steps=200)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
classifier = debug.DebugClassifier(n_classes=3)
classifier.fit(x=train_x, y=train_y, steps=200)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testMultiClass_StringLabel(self):
"""Tests multi-class classification with string labels."""
def _input_fn_train():
labels = constant_op.constant([['foo'], ['bar'], ['baz'], ['bar']])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(
n_classes=3, label_keys=['foo', 'bar', 'baz'])
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
classifier = debug.DebugClassifier(n_classes=2)
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(
weight_column_name='w',
n_classes=2,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = debug.DebugClassifier(weight_column_name='w')
classifier.fit(input_fn=_input_fn_train, steps=5)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
self._assertInRange(0.0, 1.0, scores['accuracy'])
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
labels = math_ops.cast(labels, predictions.dtype)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict_classes(input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=5,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
model_dir = tempfile.mkdtemp()
classifier = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions1 = classifier.predict_classes(input_fn=predict_input_fn)
del classifier
classifier2 = debug.DebugClassifier(
model_dir=model_dir,
n_classes=3,
config=run_config.RunConfig(tf_random_seed=1))
predictions2 = classifier2.predict_classes(input_fn=predict_input_fn)
self.assertEqual(list(predictions1), list(predictions2))
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
feature_columns = [
feature_column.real_valued_column('age'),
feature_column.embedding_column(language, dimension=1)
]
classifier = debug.DebugClassifier(
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=5)
def default_input_fn(unused_estimator, examples):
return feature_column_ops.parse_feature_columns_from_examples(
examples, feature_columns)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir, input_fn=default_input_fn)
class DebugRegressorTest(test.TestCase):
def setUp(self):
np.random.seed(100)
self.features = np.random.rand(NUM_EXAMPLES, 5)
self.targets = np.random.rand(NUM_EXAMPLES, LABEL_DIMENSION)
def testPredictScores(self):
"""Tests that DebugRegressor outputs the mean target."""
(train_features, train_labels), (test_features,
test_labels) = _train_test_split(
[self.features, self.targets])
mean_target = np.mean(train_labels, 0)
expected_prediction = np.vstack(
[mean_target for _ in range(test_labels.shape[0])])
classifier = debug.DebugRegressor(label_dimension=LABEL_DIMENSION)
classifier.fit(
input_fn=_input_fn_builder(train_features, train_labels), steps=50)
pred = classifier.predict_scores(
input_fn=_input_fn_builder(test_features, None))
self.assertAllClose(expected_prediction, np.vstack(pred), atol=0.1)
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=debug.DebugRegressor(),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, debug.DebugRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
input_fn = test_data.iris_input_logistic_fn
regressor.fit(input_fn=input_fn, steps=200)
scores = regressor.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_MatrixData_Labels1D(self):
"""Same as the last test, but label shape is [100] instead of [100, 1]."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(x=train_x, y=train_y, steps=200)
scores = regressor.evaluate(x=train_x, y=train_y, steps=1)
self.assertIn('loss', scores)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=200)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
}
return features, labels
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
self.assertIn('loss', scores)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = debug.DebugRegressor(
weight_column_name='w', config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=5)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
self.assertIn('loss', scores)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs),
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = debug.DebugRegressor(
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
model_dir = tempfile.mkdtemp()
regressor = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = debug.DebugRegressor(
model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1))
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
if __name__ == '__main__':
test.main()
| apache-2.0 |
fivejjs/bayespy | bayespy/inference/vmp/nodes/GaussianProcesses.py | 5 | 25953 | ################################################################################
# Copyright (C) 2011-2012 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import itertools
import numpy as np
#import scipy as sp
#import scipy.linalg.decomp_cholesky as decomp
import scipy.linalg as linalg
#import scipy.special as special
#import matplotlib.pyplot as plt
#import time
#import profile
#import scipy.spatial.distance as distance
import scipy.sparse as sp
from bayespy.utils import misc as utils
from . import node as EF
from . import CovarianceFunctions as CF
class CovarianceMatrix:
def cholesky(self):
pass
def multiply(A, B):
return np.multiply(A,B)
# m prior mean function
# k prior covariance function
# x data inputs
# z processed data outputs (z = inv(Cov) * (y-m(x)))
# U data covariance Cholesky factor
def gp_posterior_moment_function(m, k, x, y, k_sparse=None, pseudoinputs=None, noise=None):
# Prior
# FIXME: We are ignoring the covariance of mu now..
mu = m(x)[0]
## if np.ndim(mu) == 1:
## mu = np.asmatrix(mu).T
## else:
## mu = np.asmatrix(mu)
K_noise = None
if noise != None:
if K_noise is None:
K_noise = noise
else:
K_noise += noise
if k_sparse != None:
if K_noise is None:
K_noise = k_sparse(x,x)[0]
else:
K_noise += k_sparse(x,x)[0]
if pseudoinputs != None:
p = pseudoinputs
#print('in pseudostuff')
#print(K_noise)
#print(np.shape(K_noise))
K_pp = k(p,p)[0]
K_xp = k(x,p)[0]
U = utils.chol(K_noise)
# Compute Lambda
Lambda = K_pp + np.dot(K_xp.T, utils.chol_solve(U, K_xp))
U_lambda = utils.chol(Lambda)
# Compute statistics for posterior predictions
#print(np.shape(U_lambda))
#print(np.shape(y))
z = utils.chol_solve(U_lambda,
np.dot(K_xp.T,
utils.chol_solve(U,
y - mu)))
U = utils.chol(K_pp)
# Now we can forget the location of the observations and
# consider only the pseudoinputs when predicting.
x = p
else:
K = K_noise
if K is None:
K = k(x,x)[0]
else:
try:
K += k(x,x)[0]
except:
K = K + k(x,x)[0]
# Compute posterior GP
N = len(y)
U = None
z = None
if N > 0:
U = utils.chol(K)
z = utils.chol_solve(U, y-mu)
def get_moments(h, covariance=1, mean=True):
K_xh = k(x, h)[0]
if k_sparse != None:
try:
# This may not work, for instance, if either one is a
# sparse matrix.
K_xh += k_sparse(x, h)[0]
except:
K_xh = K_xh + k_sparse(x, h)[0]
# NumPy has problems when mixing matrices and arrays.
# Matrices may appear, for instance, when you sum an array and
# a sparse matrix. Make sure the result is either an array or
# a sparse matrix (not dense matrix!), because matrix objects
# cause lots of problems:
#
# array.dot(array) = array
# matrix.dot(array) = matrix
# sparse.dot(array) = array
if not sp.issparse(K_xh):
K_xh = np.asarray(K_xh)
# Function for computing posterior moments
if mean:
# Mean vector
# FIXME: Ignoring the covariance of prior mu
m_h = m(h)[0]
if z != None:
m_h += K_xh.T.dot(z)
else:
m_h = None
# Compute (co)variance matrix/vector
if covariance:
if covariance == 1:
## Compute variance vector
k_h = k(h)[0]
if k_sparse != None:
k_h += k_sparse(h)[0]
if U != None:
if isinstance(K_xh, np.ndarray):
k_h -= np.einsum('i...,i...',
K_xh,
utils.chol_solve(U, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h -= np.asarray(K_xh.multiply(utils.chol_solve(U, K_xh))).sum(axis=0)
if pseudoinputs != None:
if isinstance(K_xh, np.ndarray):
k_h += np.einsum('i...,i...',
K_xh,
utils.chol_solve(U_lambda, K_xh))
else:
# TODO: This isn't very efficient way, but
# einsum doesn't work for sparse matrices..
# This may consume A LOT of memory for sparse
# matrices.
k_h += np.asarray(K_xh.multiply(utils.chol_solve(U_lambda, K_xh))).sum(axis=0)
# Ensure non-negative variances
k_h[k_h<0] = 0
return (m_h, k_h)
elif covariance == 2:
## Compute full covariance matrix
K_hh = k(h,h)[0]
if k_sparse != None:
K_hh += k_sparse(h)[0]
if U != None:
K_hh -= K_xh.T.dot(utils.chol_solve(U,K_xh))
#K_hh -= np.dot(K_xh.T, utils.chol_solve(U,K_xh))
if pseudoinputs != None:
K_hh += K_xh.T.dot(utils.chol_solve(U_lambda, K_xh))
#K_hh += np.dot(K_xh.T, utils.chol_solve(U_lambda, K_xh))
return (m_h, K_hh)
else:
return (m_h, None)
return get_moments
# Constant function using GP mean protocol
class Constant(EF.Node):
def __init__(self, f, **kwargs):
self.f = f
EF.Node.__init__(self, dims=[(np.inf,)], **kwargs)
def message_to_child(self, gradient=False):
# Wrapper
def func(x, gradient=False):
if gradient:
return ([self.f(x), None], [])
else:
return [self.f(x), None]
return func
#class MultiDimensional(EF.NodeVariable):
# """ A multi-dimensional Gaussian process f(x). """
## class ToGaussian(EF.NodeVariable):
## """ Deterministic node which transform a Gaussian process into
## finite-dimensional Gaussian variable. """
## def __init__(self, f, x, **kwargs):
## EF.NodeVariable.__init__(self,
## f,
## x,
## plates=
## dims=
# Deterministic node for creating a set of GPs which can be used as a
# mean function to a general GP node.
class Multiple(EF.Node):
def __init__(self, GPs, **kwargs):
# Ignore plates
EF.NodeVariable.__init__(self,
*GPs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def message_to_parent(self, index):
raise Exception("not implemented yet")
def message_to_child(self, gradient=False):
u = [parent.message_to_child() for parent in self.parents]
def get_moments(xh, **kwargs):
mh_all = []
khh_all = []
for i in range(len(self.parents)):
xi = np.array(xh[i])
#print(xi)
#print(np.shape(xi))
#print(xi)
# FIXME: We are ignoring the covariance of mu now..
if gradient:
((mh, khh), dm) = u[i](xi, **kwargs)
else:
(mh, khh) = u[i](xi, **kwargs)
#mh = u[i](xi, **kwargs)[0]
#print(mh)
#print(mh_all)
## print(mh)
## print(khh)
## print(np.shape(mh))
mh_all = np.concatenate([mh_all, mh])
#print(np.shape(mh_all))
if khh != None:
print(khh)
raise Exception('Not implemented yet for covariances')
#khh_all = np.concatenate([khh_all, khh])
# FIXME: Compute gradients!
if gradient:
return ([mh_all, khh_all], [])
else:
return [mh_all, khh_all]
#return [mh_all, khh_all]
return get_moments
# Gaussian process distribution
class GaussianProcess(EF.Node):
def __init__(self, m, k, k_sparse=None, pseudoinputs=None, **kwargs):
self.x = np.array([])
self.f = np.array([])
## self.x_obs = np.zeros((0,1))
## self.f_obs = np.zeros((0,))
if pseudoinputs != None:
pseudoinputs = EF.NodeConstant([pseudoinputs],
dims=[np.shape(pseudoinputs)])
# By default, posterior == prior
self.m = None #m
self.k = None #k
if isinstance(k, list) and isinstance(m, list):
if len(k) != len(m):
raise Exception('The number of mean and covariance functions must be equal.')
k = CF.Multiple(k)
m = Multiple(m)
elif isinstance(k, list):
D = len(k)
k = CF.Multiple(k)
m = Multiple(D*[m])
elif isinstance(m, list):
D = len(m)
k = CF.Multiple(D*[k])
m = Multiple(m)
# Ignore plates
EF.NodeVariable.__init__(self,
m,
k,
k_sparse,
pseudoinputs,
plates=(),
dims=[(np.inf,), (np.inf,np.inf)],
**kwargs)
def __call__(self, x, covariance=None):
if not covariance:
return self.u(x, covariance=False)[0]
elif covariance.lower() == 'vector':
return self.u(x, covariance=1)
elif covariance.lower() == 'matrix':
return self.u(x, covariance=2)
else:
raise Exception("Unknown covariance type requested")
def message_to_parent(self, index):
if index == 0:
k = self.parents[1].message_to_child()[0]
K = k(self.x, self.x)
return [self.x,
self.mu,
K]
if index == 1:
raise Exception("not implemented yet")
def message_to_child(self):
if self.observed:
raise Exception("Observable GP should not have children.")
return self.u
def get_parameters(self):
return self.u
def observe(self, x, f):
self.observed = True
self.x = x
self.f = f
## if np.ndim(f) == 1:
## self.f = np.asmatrix(f).T
## else:
## self.f = np.asmatrix(f)
# You might want:
# - mean for x
# - covariance (and mean) for x
# - variance (and mean) for x
# - i.e., mean and/or (co)variance for x
# - covariance for x1 and x2
def lower_bound_contribution(self, gradient=False):
# Get moment functions from parents
m = self.parents[0].message_to_child(gradient=gradient)
k = self.parents[1].message_to_child(gradient=gradient)
if self.parents[2]:
k_sparse = self.parents[2].message_to_child(gradient=gradient)
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child(gradient=gradient)
#pseudoinputs = self.parents[3].message_to_child(gradient=gradient)[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child(gradient=gradient)[0]
## k = self.parents[1].message_to_child(gradient=gradient)[0]
# Compute the parameters (covariance matrices etc) using
# parents' moment functions
DKs_xx = []
DKd_xx = []
DKd_xp = []
DKd_pp = []
Dxp = []
Dmu = []
if gradient:
# FIXME: We are ignoring the covariance of mu now..
((mu, _), Dmu) = m(self.x, gradient=True)
## if k_sparse:
## ((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
if pseudoinputs:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
((xp,), Dxp) = pseudoinputs
((Kd_pp,), DKd_pp) = k(xp,xp, gradient=True)
((Kd_xp,), DKd_xp) = k(self.x, xp, gradient=True)
else:
((K_xx,), DKd_xx) = k(self.x, self.x, gradient=True)
if k_sparse:
((Ks_xx,), DKs_xx) = k_sparse(self.x, self.x, gradient=True)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
else:
# FIXME: We are ignoring the covariance of mu now..
(mu, _) = m(self.x)
## if k_sparse:
## (Ks_xx,) = k_sparse(self.x, self.x)
if pseudoinputs:
(Ks_xx,) = k_sparse(self.x, self.x)
(xp,) = pseudoinputs
(Kd_pp,) = k(xp, xp)
(Kd_xp,) = k(self.x, xp)
else:
(K_xx,) = k(self.x, self.x)
if k_sparse:
(Ks_xx,) = k_sparse(self.x, self.x)
try:
K_xx += Ks_xx
except:
K_xx = K_xx + Ks_xx
mu = mu[0]
#K = K[0]
# Log pdf
if self.observed:
## Log pdf for directly observed GP
f0 = self.f - mu
#print('hereiam')
#print(K)
if pseudoinputs:
## Pseudo-input approximation
# Decompose the full-rank sparse/noise covariance matrix
try:
Us_xx = utils.cholesky(Ks_xx)
except linalg.LinAlgError:
print('Noise/sparse covariance not positive definite')
return -np.inf
# Use Woodbury-Sherman-Morrison formula with the
# following notation:
#
# y2 = f0' * inv(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx) * f0
#
# z = Ks_xx \ f0
# Lambda = Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp
# nu = inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
# rho = Kd_xp * inv(Lambda) * (Kd_xp' * (Ks_xx \ f0))
#
# y2 = f0' * z - z' * rho
z = Us_xx.solve(f0)
Lambda = Kd_pp + np.dot(Kd_xp.T,
Us_xx.solve(Kd_xp))
## z = utils.chol_solve(Us_xx, f0)
## Lambda = Kd_pp + np.dot(Kd_xp.T,
## utils.chol_solve(Us_xx, Kd_xp))
try:
U_Lambda = utils.cholesky(Lambda)
#U_Lambda = utils.chol(Lambda)
except linalg.LinAlgError:
print('Lambda not positive definite')
return -np.inf
nu = U_Lambda.solve(np.dot(Kd_xp.T, z))
#nu = utils.chol_solve(U_Lambda, np.dot(Kd_xp.T, z))
rho = np.dot(Kd_xp, nu)
y2 = np.dot(f0, z) - np.dot(z, rho)
# Use matrix determinant lemma
#
# det(Kd_xp*inv(Kd_pp)*Kd_xp' + Ks_xx)
# = det(Kd_pp + Kd_xp'*inv(Ks_xx)*Kd_xp)
# * det(inv(Kd_pp)) * det(Ks_xx)
# = det(Lambda) * det(Ks_xx) / det(Kd_pp)
try:
Ud_pp = utils.cholesky(Kd_pp)
#Ud_pp = utils.chol(Kd_pp)
except linalg.LinAlgError:
print('Covariance of pseudo inputs not positive definite')
return -np.inf
logdet = (U_Lambda.logdet()
+ Us_xx.logdet()
- Ud_pp.logdet())
## logdet = (utils.logdet_chol(U_Lambda)
## + utils.logdet_chol(Us_xx)
## - utils.logdet_chol(Ud_pp))
# Compute the log pdf
L = gaussian_logpdf(y2,
0,
0,
logdet,
np.size(self.f))
# Add the variational cost of the pseudo-input
# approximation
# Compute gradients
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = np.nan
# Send the derivative message
func(d)
for (dKs_xx, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
for (dKd_xp, func) in DKd_xp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
V = Ud_pp.solve(Kd_xp.T)
Z = Us_xx.solve(V.T)
## V = utils.chol_solve(Ud_pp, Kd_xp.T)
## Z = utils.chol_solve(Us_xx, V.T)
for (dKd_pp, func) in DKd_pp:
# Compute derivative w.r.t. covariance matrix
d = (0.5 * np.trace(Ud_pp.solve(dKd_pp))
- 0.5 * np.trace(U_Lambda.solve(dKd_pp))
+ np.dot(nu, np.dot(dKd_pp, nu))
+ np.trace(np.dot(dKd_pp,
np.dot(V,Z))))
## d = (0.5 * np.trace(utils.chol_solve(Ud_pp, dKd_pp))
## - 0.5 * np.trace(utils.chol_solve(U_Lambda, dKd_pp))
## + np.dot(nu, np.dot(dKd_pp, nu))
## + np.trace(np.dot(dKd_pp,
## np.dot(V,Z))))
# Send the derivative message
func(d)
for (dxp, func) in Dxp:
# Compute derivative w.r.t. covariance matrix
d = np.nan
# Send the derivative message
func(d)
else:
## Full exact (no pseudo approximations)
try:
U = utils.cholesky(K_xx)
#U = utils.chol(K_xx)
except linalg.LinAlgError:
print('non positive definite, return -inf')
return -np.inf
z = U.solve(f0)
#z = utils.chol_solve(U, f0)
#print(K)
L = utils.gaussian_logpdf(np.dot(f0, z),
0,
0,
U.logdet(),
## utils.logdet_chol(U),
np.size(self.f))
for (dmu, func) in Dmu:
# Derivative w.r.t. mean vector
d = -np.sum(z)
# Send the derivative message
func(d)
for (dK, func) in DKd_xx:
# Compute derivative w.r.t. covariance matrix
#
# TODO: trace+chol_solve should be handled better
# for sparse matrices. Use sparse-inverse!
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
#print('derivate', d, dK)
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
#
# Send the derivative message
func(d)
for (dK, func) in DKs_xx:
# Compute derivative w.r.t. covariance matrix
d = 0.5 * (dK.dot(z).dot(z)
- U.trace_solve_gradient(dK))
## - np.trace(U.solve(dK)))
## d = 0.5 * (dK.dot(z).dot(z)
## - np.trace(utils.chol_solve(U, dK)))
## d = 0.5 * (np.dot(z, np.dot(dK, z))
## - np.trace(utils.chol_solve(U, dK)))
# Send the derivative message
func(d)
else:
## Log pdf for latent GP
raise Exception('Not implemented yet')
return L
## Let f1 be observed and f2 latent function values.
# Compute <log p(f1,f2|m,k)>
#L = gaussian_logpdf(sum_product(np.outer(self.f,self.f) + self.Cov,
# Compute <log q(f2)>
def update(self):
# Messages from parents
m = self.parents[0].message_to_child()
k = self.parents[1].message_to_child()
if self.parents[2]:
k_sparse = self.parents[2].message_to_child()
else:
k_sparse = None
if self.parents[3]:
pseudoinputs = self.parents[3].message_to_child()[0]
else:
pseudoinputs = None
## m = self.parents[0].message_to_child()[0]
## k = self.parents[1].message_to_child()[0]
if self.observed:
# Observations of this node
self.u = gp_posterior_moment_function(m,
k,
self.x,
self.f,
k_sparse=k_sparse,
pseudoinputs=pseudoinputs)
else:
x = np.array([])
y = np.array([])
# Messages from children
for (child,index) in self.children:
(msg, mask) = child.message_to_parent(index)
# Ignoring masks and plates..
# m[0] is the inputs
x = np.concatenate((x, msg[0]), axis=-2)
# m[1] is the observations
y = np.concatenate((y, msg[1]))
# m[2] is the covariance matrix
V = linalg.block_diag(V, msg[2])
self.u = gp_posterior_moment_function(m, k, x, y, covariance=V)
self.x = x
self.f = y
# At least for now, simplify this GP node such that a GP is either
# observed or latent. If it is observed, it doesn't take messages from
# children, actually, it should not even have children!
## # Pseudo for GPFA:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_rq(magnitude=.., lengthscale=.., alpha=..)
## f = NodeGPSet(0, [k1,k2,k3]) # assumes block diagonality
## # f = NodeGPSet(0, [[k11,k12,k13],[k21,k22,k23],[k31,k32,k33]])
## X = GaussianFromGP(f, [ [[t0,0],[t0,1],[t0,2]], [t1,0],[t1,1],[t1,2], ..])
## ...
## # Construct a sum of GPs if interested only in the sum term
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k = gp_cov_sum(k1, k2)
## f = NodeGP(0, k)
## f.observe(x, y)
## f.update()
## (mp, kp) = f.get_parameters()
## # Construct a sum of GPs when interested also in the individual
## # GPs:
## k1 = gp_cov_se(magnitude=theta1, lengthscale=theta2)
## k2 = gp_cov_periodic(magnitude=.., lengthscale=.., period=..)
## k3 = gp_cov_delta(magnitude=theta3)
## f = NodeGPSum(0, [k1,k2,k3])
## x = np.array([1,2,3,4,5,6,7,8,9,10])
## y = np.sin(x[0]) + np.random.normal(0, 0.1, (10,))
## # Observe the sum (index 0)
## f.observe((0,x), y)
## # Inference
## f.update()
## (mp, kp) = f.get_parameters()
## # Mean of the sum
## mp[0](...)
## # Mean of the individual terms
## mp[1](...)
## mp[2](...)
## mp[3](...)
## # Covariance of the sum
## kp[0][0](..., ...)
## # Other covariances
## kp[1][1](..., ...)
## kp[2][2](..., ...)
## kp[3][3](..., ...)
## kp[1][2](..., ...)
## kp[1][3](..., ...)
## kp[2][3](..., ...)
| mit |
elkingtonmcb/scikit-learn | sklearn/utils/tests/test_linear_assignment.py | 421 | 1349 | # Author: Brian M. Clapper, G Varoquaux
# License: BSD
import numpy as np
# XXX we should be testing the public API here
from sklearn.utils.linear_assignment_ import _hungarian
def test_hungarian():
matrices = [
# Square
([[400, 150, 400],
[400, 450, 600],
[300, 225, 300]],
850 # expected cost
),
# Rectangular variant
([[400, 150, 400, 1],
[400, 450, 600, 2],
[300, 225, 300, 3]],
452 # expected cost
),
# Square
([[10, 10, 8],
[9, 8, 1],
[9, 7, 4]],
18
),
# Rectangular variant
([[10, 10, 8, 11],
[9, 8, 1, 1],
[9, 7, 4, 10]],
15
),
# n == 2, m == 0 matrix
([[], []],
0
),
]
for cost_matrix, expected_total in matrices:
cost_matrix = np.array(cost_matrix)
indexes = _hungarian(cost_matrix)
total_cost = 0
for r, c in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
indexes = _hungarian(cost_matrix.T)
total_cost = 0
for c, r in indexes:
x = cost_matrix[r, c]
total_cost += x
assert expected_total == total_cost
| bsd-3-clause |
anki1909/peach | tutorial/fuzzy-logic/simple-controller.py | 6 | 5727 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/simple-controller.py
# A simgle-input-single-output Mamdani controller
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
import pylab as p
# This tutorial shows how to work with a fuzzy-based controller. It is really
# easy to build a standard controller using Peach. We won't go into details of
# how a controller should work -- please, consult the literature on the subject,
# as it is very rich and explains the topic a lot better than we could do here.
#
# We will show how to build a simple single-input single-output controller for
# no specific plant -- it will be completelly abstract. The goal is to show how
# to work with the capabilities built in Peach for dealing with it. A Mamdani
# controller has, typically, three steps: fuzzification, in which numerical
# values are converted to the fuzzy domain; decision rules, where the
# relationship between controlled variable and manipulated variable are
# stablished; and defuzzification, where we travel back from fuzzified domain to
# crisp numerical values.
#
# To build a controller, thus, we need to specify the membership functions of
# the controlled variable. There are a number of ways of doing that (please, see
# the tutorial on membership functions for more detail): we could use built-in
# membership functions; define our own membership functions; or use a support
# function, such as the one below.
#
# Suppose we wanted to use three membership functions to fuzzify our input
# variable: a decreasing ramp from -1 to 0, a triangle ramp from -1 to 0 to 1,
# and an increasing ramp from 0 to 1. We could define these functions as:
#
# i_neg = DecreasingRamp(-1, 0)
# i_zero = Triangle(-1, 0, 1)
# i_pos = IncreasingRamp(0, 1)
#
# Nothing wrong with this method. But, since sequences of triangles are so usual
# in fuzzy controllers, Peach has two methods to create them in a batch. The
# first one is the ``Saw`` function: given an interval and a number of
# functions, it splits the interval in equally spaced triangles. The second one
# is the ``FlatSaw`` function: it also creates a sequence of equally spaced
# triangles, but use a decreasing ramp as the first function, and an increasing
# function as the last one. Both of them return a tuple containing the functions
# in order. The same functions above could be created with the command:
i_neg, i_zero, i_pos = FlatSaw((-2, 2), 3)
# assuming, that is, that the input variable will range from -2 to 2. Notice
# that if we don't use the correct interval, the starts and ends of the
# functions won't fall where we want them. Notice, also, that we are here using
# membership functions, not fuzzy sets!
# We will also need to create membership functions for the output variable.
# Let's assume we need three functions as above, in the range from -10 to 10. We
# do:
o_neg, o_zero, o_pos = FlatSaw((-10, 10), 3)
# The control will be done following the decision rules:
#
# IF input is negative THEN output is positive
# IF input is zero THEN output is zero
# IF input is positive THEN output is negative
#
# We will create now the controller that will implement these rules. Here is
# what we do:
Points = 100
yrange = numpy.linspace(-10., 10., 500)
c = Controller(yrange)
# Here, ``yrange`` is the interval in which the output variable is defined. Our
# controlled doesn't have any rules, so we must add them. To add rules to a
# controller, we use the ``add_rule`` method. A rule is a tuple with the
# following format:
#
# ((input_mf, ), output_mf)
#
# where ``input_mf`` is the condition, and ``output_mf`` is the consequence.
# This format can be used to control multiple variables. For instance, if you
# wanted to control three variables, a rule would have the form:
#
# ((input1_mf, input2_mf, input3_mf), output_mf)
#
# Notice that the conditions are wrapped in a tuple themselves. We will add the
# rules of our controller now:
c.add_rule(((i_neg,), o_pos))
c.add_rule(((i_zero,), o_zero))
c.add_rule(((i_pos,), o_neg))
# The controller is ready to run. We use the ``__call__`` interface to pass to
# the controller the values of the variables (in the form of a n-dimension
# array), and it returns us the result. We will iterate over the domain of the
# input variable to plot the transfer function:
x = numpy.linspace(-2., 2., Points)
y = [ ]
for x0 in x:
y.append(c(x0))
y = numpy.array(y)
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'simple-controller-mf.png', containing the membership
# functions, and another called 'simple-controller.png', containing the transfer
# function.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8., 4.)
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.hold(True)
a1.plot(x, i_neg(x))
a1.plot(x, i_zero(x))
a1.plot(x, i_pos(x))
a1.set_xlim([ -2., 2. ])
a1.set_ylim([ -0.1, 1.1 ])
a1.legend([ 'Negative', 'Zero', 'Positive' ])
savefig("simple-controller-mf.png")
clf()
a1 = axes([ 0.125, 0.10, 0.775, 0.8 ])
a1.plot(x, y, 'k-')
a1.set_xlim([ -2., 2. ])
a1.set_ylim([ -10., 10. ])
savefig("simple-controller.png")
except ImportError:
pass | lgpl-2.1 |
MatthieuBizien/scikit-learn | sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/cluster/plot_cluster_comparison.py | 8 | 6716 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
vijaysbhat/incubator-airflow | airflow/contrib/hooks/salesforce_hook.py | 30 | 12110 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a Salesforce Hook
which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file
for other uses.
NOTE: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
from simple_salesforce import Salesforce
from airflow.hooks.base_hook import BaseHook
import logging
import json
import pandas as pd
import time
class SalesforceHook(BaseHook):
def __init__(
self,
conn_id,
*args,
**kwargs
):
"""
Create new connection to Salesforce
and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other
Airflow operators to move the data into another data source
:param conn_id: the name of the connection that has the parameters
we need to connect to Salesforce.
The conenction shoud be type `http` and include a
user's security token in the `Extras` field.
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as:
`{"security_token":"YOUR_SECRUITY_TOKEN"}`
"""
self.conn_id = conn_id
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def sign_in(self):
"""
Sign into Salesforce.
If we have already signed it, this will just return the original object
"""
if hasattr(self, 'sf'):
return self.sf
# connect to Salesforce
sf = Salesforce(
username=self.connection.login,
password=self.connection.password,
security_token=self.extras['security_token'],
instance_url=self.connection.host
)
self.sf = sf
return sf
def make_query(self, query):
"""
Make a query to Salesforce. Returns result in dictionary
:param query: The query to make to Salesforce
"""
self.sign_in()
logging.info("Querying for all objects")
query = self.sf.query_all(query)
logging.info(
"Received results: Total size: {0}; Done: {1}".format(
query['totalSize'], query['done']
)
)
query = json.loads(json.dumps(query))
return query
def describe_object(self, obj):
"""
Get the description of an object from Salesforce.
This description is the object's schema
and some extra metadata that Salesforce stores for each object
:param obj: Name of the Salesforce object
that we are getting a description of.
"""
self.sign_in()
return json.loads(json.dumps(self.sf.__getattr__(obj).describe()))
def get_available_fields(self, obj):
"""
Get a list of all available fields for an object.
This only returns the names of the fields.
"""
self.sign_in()
desc = self.describe_object(obj)
return [f['name'] for f in desc['fields']]
def _build_field_list(self, fields):
# join all of the fields in a comma seperated list
return ",".join(fields)
def get_object_from_salesforce(self, obj, fields):
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
"""
field_string = self._build_field_list(fields)
query = "SELECT {0} FROM {1}".format(field_string, obj)
logging.info(
"Making query to salesforce: {0}".format(
query if len(query) < 30
else " ... ".join([query[:15], query[-15:]])
)
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, col):
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param col: A Series object representing a column of a dataframe.
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
col = pd.to_datetime(col)
except ValueError:
logging.warning(
"Could not convert field to timestamps: {0}".format(col.name)
)
return col
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for i in col:
try:
converted.append(i.timestamp())
except ValueError:
converted.append(pd.np.NaN)
except AttributeError:
converted.append(pd.np.NaN)
# return a new series that maintains the same index as the original
return pd.Series(converted, index=col.index)
def write_object_to_file(
self,
query_results,
filename,
fmt="csv",
coerce_to_timestamp=False,
record_time_added=False
):
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-seperated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line deliminated
instead of comman deliminated like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV,
but as milisecond Unix timestamps.
By default, this function will try and leave all values as
they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes
to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your
datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:param filename: the name of the file where the data
should be dumped to
:param fmt: the format you want the output in.
*Default:* csv.
:param coerce_to_timestamp: True if you want all datetime fields to be
converted into Unix timestamps.
False if you want them to be left in the
same format as they were in Salesforce.
Leaving the value as False will result
in datetimes being strings.
*Defaults to False*
:param record_time_added: *(optional)* True if you want to add a
Unix timestamp field to the resulting data
that marks when the data
was fetched from Salesforce.
*Default: False*.
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError("Format value is not recognized: {0}".format(fmt))
# this line right here will convert all integers to floats if there are
# any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [c.lower() for c in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
logging.info("Coercing timestamps for: {0}".format(object_name))
schema = self.describe_object(object_name)
# possible columns that can be convereted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
i['name'].lower()
for i in schema['fields']
if i['type'] in ["date", "datetime"] and
i['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(
lambda x: self._to_timestamp(x)
)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects
# that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
logging.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\r\n", "")
)
df[possible_strings] = df[possible_strings].apply(
lambda x: x.str.replace("\n", "")
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
| apache-2.0 |
jzadeh/Aktaion | python/parserDev/brothon/analysis/dataframe_stats.py | 1 | 5040 | """ DataFrame Statistics Methods
- Contingency Table (also called Cross Tabulation)
- Joint Distribution
- G-Scores
References:
- http://en.wikipedia.org/wiki/Contingency_table
- http://en.wikipedia.org/wiki/G_test (Wikipedia)
- http://udel.edu/~mcdonald/stathyptesting.html (Hypothesis Testing)
"""
from __future__ import print_function
import math
# Third Party
import pandas as pd
def contingency_table(dataframe, rownames, colnames, margins=True):
"""Contingency Table (also called Cross Tabulation)
- Table in a matrix format that displays the (multivariate) frequency distribution of the variables
- http://en.wikipedia.org/wiki/Contingency_table
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
# Taking just the rownames + colnames of the dataframe
sub_set = [rownames, colnames]
_sub_df = dataframe[sub_set]
return _sub_df.pivot_table(index=rownames, columns=colnames, margins=margins, aggfunc=len, fill_value=0)
def joint_distribution(dataframe, rownames, colnames):
"""Joint Distribution Table
- The Continguency Table normalized by the total number of observations
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)
total_observations = cont_table['All']['All']
return cont_table/total_observations
def expected_counts(dataframe, rownames, colnames):
"""Expected counts of the multivariate frequency distribution of the variables given the
null hypothesis of complete independence between variables.
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=True)
row_counts = cont_table['All']
column_counts = cont_table.loc['All']
total_observations = cont_table['All']['All']
# There didn't seem to be a good way to vectorize this (Fixme?)
for column in cont_table.columns:
for row in cont_table.index:
cont_table[column][row] = column_counts[column]*row_counts[row]/total_observations
return cont_table
def g_test_scores(dataframe, rownames, colnames):
"""G Test Score for log likelihood ratio
- http://en.wikipedia.org/wiki/G_test (Wikipedia)
- 95th percentile; 5% level; p < 0.05; critical value = 3.84
- 99th percentile; 1% level; p < 0.01; critical value = 6.63
- 99.9th percentile; 0.1% level; p < 0.001; critical value = 10.83
- 99.99th percentile; 0.01% level; p < 0.0001; critical value = 15.13
Args:
rownames: the column name or list of columns names that make the keys of the rows
colnames: the column name or list of columns names that make the keys of the columns
"""
cont_table = contingency_table(dataframe, rownames=rownames, colnames=colnames, margins=False)
exp_counts = expected_counts(dataframe, rownames=rownames, colnames=colnames)
# There didn't seem to be a good way to vectorize this (Fixme?)
for row in cont_table.index:
g_score = 0
for column in cont_table.columns:
g_score += compute_g(cont_table[column][row], exp_counts[column][row])
for column in cont_table.columns:
cont_table[column][row] = g_score if cont_table[column][row] > exp_counts[column][row] else -g_score
return cont_table
def compute_g(count, expected):
"""G Test Score for log likelihood ratio
- http://en.wikipedia.org/wiki/G_test (Wikipedia)
"""
return 2.0 * count * math.log(count/expected) if count else 0
# Simple test of the functionality
def test():
"""Test for DataFrame Stats module"""
import os
from brothon.utils import file_utils
# Open a dataset (relative path)
data_dir = file_utils.relative_dir(__file__, 'data')
file_path = os.path.join(data_dir, 'g_test_data.csv')
dataframe = pd.read_csv(file_path)
print(dataframe.head())
# Print out the contingency_table
print('\nContingency Table')
print(contingency_table(dataframe, 'name', 'status'))
# Print out the joint_distribution
print('\nJoint Distribution Table')
print(joint_distribution(dataframe, 'name', 'status'))
# Print out the expected_counts
print('\nExpected Counts Table')
print(expected_counts(dataframe, 'name', 'status'))
# Print out the g_test scores
print('\nG-Test Scores')
print(g_test_scores(dataframe, 'name', 'status'))
if __name__ == "__main__":
test()
| apache-2.0 |
sutherlandm/apm_planner | libs/mavlink/share/pyshared/pymavlink/examples/mavgraph.py | 29 | 5951 | #!/usr/bin/env python
'''
graph a MAVLink log file
Andrew Tridgell August 2011
'''
import sys, struct, time, os, datetime
import math, re
import pylab, pytz, matplotlib
from math import *
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from mavextra import *
locator = None
formatter = None
def plotit(x, y, fields, colors=[]):
'''plot a set of graphs using date for x axis'''
global locator, formatter
pylab.ion()
fig = pylab.figure(num=1, figsize=(12,6))
ax1 = fig.gca()
ax2 = None
xrange = 0.0
for i in range(0, len(fields)):
if len(x[i]) == 0: continue
if x[i][-1] - x[i][0] > xrange:
xrange = x[i][-1] - x[i][0]
xrange *= 24 * 60 * 60
if formatter is None:
if xrange < 1000:
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
else:
formatter = matplotlib.dates.DateFormatter('%H:%M')
interval = 1
intervals = [ 1, 2, 5, 10, 15, 30, 60, 120, 240, 300, 600,
900, 1800, 3600, 7200, 5*3600, 10*3600, 24*3600 ]
for interval in intervals:
if xrange / interval < 15:
break
locator = matplotlib.dates.SecondLocator(interval=interval)
ax1.xaxis.set_major_locator(locator)
ax1.xaxis.set_major_formatter(formatter)
empty = True
ax1_labels = []
ax2_labels = []
for i in range(0, len(fields)):
if len(x[i]) == 0:
print("Failed to find any values for field %s" % fields[i])
continue
if i < len(colors):
color = colors[i]
else:
color = 'red'
(tz, tzdst) = time.tzname
if axes[i] == 2:
if ax2 == None:
ax2 = ax1.twinx()
ax = ax2
ax2.xaxis.set_major_locator(locator)
ax2.xaxis.set_major_formatter(formatter)
label = fields[i]
if label.endswith(":2"):
label = label[:-2]
ax2_labels.append(label)
else:
ax1_labels.append(fields[i])
ax = ax1
ax.plot_date(x[i], y[i], color=color, label=fields[i],
linestyle='-', marker='None', tz=None)
pylab.draw()
empty = False
if ax1_labels != []:
ax1.legend(ax1_labels,loc=opts.legend)
if ax2_labels != []:
ax2.legend(ax2_labels,loc=opts.legend2)
if empty:
print("No data to graph")
return
from optparse import OptionParser
parser = OptionParser("mavgraph.py [options] <filename> <fields>")
parser.add_option("--no-timestamps",dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_option("--planner",dest="planner", action='store_true', help="use planner file format")
parser.add_option("--condition",dest="condition", default=None, help="select packets by a condition")
parser.add_option("--labels",dest="labels", default=None, help="comma separated field labels")
parser.add_option("--mav10", action='store_true', default=False, help="Use MAVLink protocol 1.0")
parser.add_option("--legend", default='upper left', help="default legend position")
parser.add_option("--legend2", default='upper right', help="default legend2 position")
(opts, args) = parser.parse_args()
if opts.mav10:
os.environ['MAVLINK10'] = '1'
import mavutil
if len(args) < 2:
print("Usage: mavlogdump.py [options] <LOGFILES...> <fields...>")
sys.exit(1)
filenames = []
fields = []
for f in args:
if os.path.exists(f):
filenames.append(f)
else:
fields.append(f)
msg_types = set()
multiplier = []
field_types = []
colors = [ 'red', 'green', 'blue', 'orange', 'olive', 'black', 'grey' ]
# work out msg types we are interested in
x = []
y = []
axes = []
first_only = []
re_caps = re.compile('[A-Z_]+')
for f in fields:
caps = set(re.findall(re_caps, f))
msg_types = msg_types.union(caps)
field_types.append(caps)
y.append([])
x.append([])
axes.append(1)
first_only.append(False)
def add_data(t, msg, vars):
'''add some data'''
mtype = msg.get_type()
if mtype not in msg_types:
return
for i in range(0, len(fields)):
if mtype not in field_types[i]:
continue
f = fields[i]
if f.endswith(":2"):
axes[i] = 2
f = f[:-2]
if f.endswith(":1"):
first_only[i] = True
f = f[:-2]
v = mavutil.evaluate_expression(f, vars)
if v is None:
continue
y[i].append(v)
x[i].append(t)
def process_file(filename):
'''process one file'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=opts.notimestamps)
vars = {}
while True:
msg = mlog.recv_match(opts.condition)
if msg is None: break
tdays = (msg._timestamp - time.timezone) / (24 * 60 * 60)
tdays += 719163 # pylab wants it since 0001-01-01
add_data(tdays, msg, mlog.messages)
if len(filenames) == 0:
print("No files to process")
sys.exit(1)
if opts.labels is not None:
labels = opts.labels.split(',')
if len(labels) != len(fields)*len(filenames):
print("Number of labels (%u) must match number of fields (%u)" % (
len(labels), len(fields)*len(filenames)))
sys.exit(1)
else:
labels = None
for fi in range(0, len(filenames)):
f = filenames[fi]
process_file(f)
for i in range(0, len(x)):
if first_only[i] and fi != 0:
x[i] = []
y[i] = []
if labels:
lab = labels[fi*len(fields):(fi+1)*len(fields)]
else:
lab = fields[:]
plotit(x, y, lab, colors=colors[fi*len(fields):])
for i in range(0, len(x)):
x[i] = []
y[i] = []
pylab.show()
raw_input('press enter to exit....')
| agpl-3.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/tests/test_pipeline.py | 3 | 4858 | """
Test the pipeline module.
"""
import numpy as np
from nose.tools import assert_raises, assert_equal, assert_false, assert_true
from sklearn.base import BaseEstimator, clone
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition.pca import PCA, RandomizedPCA
from sklearn.datasets import load_iris
from sklearn.preprocessing import Scaler
class IncorrectT(BaseEstimator):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
""" Test the various init parameters of the pipeline.
"""
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline,
[('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that appart from estimators, the parameters are the same
params = pipe.get_params()
params2 = pipe2.get_params()
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
""" Test the various methods of the pipeline (anova).
"""
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
"""Test that the pipeline can take fit parameters
"""
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_methods_pca_svm():
"""Test the various methods of the pipeline (pca + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
"""Test the various methods of the pipeline (preprocessing + svm)."""
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = Scaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('scaler', scaler), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
| agpl-3.0 |
hetajen/vnpy161 | vn.api/vn.datayes/api.py | 11 | 43801 | #encoding: UTF-8
import os
import json
import time
import requests
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from Queue import Queue, Empty
from threading import Thread, Timer
from pymongo import MongoClient
from requests.exceptions import ConnectionError
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError)
class Config(object):
"""
Json-like config object.
The Config contains all kinds of settings and user info that
could be useful in the implementation of Api wrapper.
privates
--------
* head: string; the name of config file.
* token: string; user's token.
* body: dictionary; the main content of config.
- domain: string, api domain.
- ssl: boolean, specifes http or https usage.
- version: string, version of the api. Currently 'v1'.
- header: dictionary; the request header which contains
authorization infomation.
"""
head = 'my config'
toke_ = '44ebc0f058981f85382595f9f15f967' + \
'0c7eaf2695de30dd752e8f33e9022baa0'
token = '575593eb7696aec7339224c0fac2313780d8645f68b77369dcb35f8bcb419a0b'
body = {
'ssl': False,
'domain': 'api.wmcloud.com/data',
'version': 'v1',
'header': {
'Connection' : 'keep-alive',
'Authorization': 'Bearer ' + token
}
}
def __init__(self, head=None, token=None, body=None):
"""
Reloaded constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
if head:
self.head = head
if token:
self.token = token
if body:
self.body = body
def view(self):
""" Prettify printing method. """
config_view = {
'config_head' : self.head,
'config_body' : self.body,
'user_token' : self.token
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# Data containers.
class BaseDataContainer(object):
"""
Basic data container. The fundamental of all other data
container objects defined within this module.
privates
--------
* head: string; the head(type) of data container.
* body: dictionary; data content. Among all sub-classes that inherit
BaseDataContainer, type(body) varies according to the financial meaning
that the child data container stands for.
- History:
- Bar
"""
head = 'ABSTRACT_DATA'
body = dict()
pass
class History(BaseDataContainer):
"""
Historical data container. The foundation of all other pandas
DataFrame-like two dimensional data containers for this module.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ...],
'retCode': 1,
'retMsg': 'Success'}.
So the body of data is actually in data['data'], which is
our target when constructing the container.
"""
try:
assert 'data' in data
self.body = pd.DataFrame(data['data'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
class Bar(History):
"""
Historical Bar data container. Inherits from History()
DataFrame-like two dimensional data containers for Bar data.
privates
--------
* head: string; the head(type) of data container.
* body: pd.DataFrame object; contains data contents.
"""
head = 'HISTORY_BAR'
body = pd.DataFrame()
def __init__(self, data):
"""
Reloaded constructor.
parameters
----------
* data: dictionary; usually a Json-like response from
web based api. For our purposes, data is exactly resp.json()
where resp is the response from datayes developer api.
- example: {'data': [{
'exchangeCD': 'XSHG',
'utcOffset': '+08:00',
'unit': 1,
'currencyCD': 'CNY',
'barBodys': [
{
'closePrice': 15.88,
'date': 20150701, ...
},
{
'closePrice': 15.99,
'date': 20150702, ...
}, ... ],
'ticker': '000001',
'shortNM': u'\u4e0a\u8bc1\u6307\u6570'
}, ...(other tickers) ],
'retCode': 1,
'retMsg': 'Success'}.
When requesting 1 ticker, json['data'] layer has only one element;
we expect that this is for data collectioning for multiple tickers,
which is currently impossible nevertheless.
So we want resp.json()['data'][0]['barBodys'] for Bar data contents,
and that is what we go into when constructing Bar.
"""
try:
assert 'data' in data
assert 'barBodys' in data['data'][0]
self.body = pd.DataFrame(data['data'][0]['barBodys'])
except AssertionError:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + 'input is not a dataframe.'
raise VNPAST_DataConstructorError(msg)
except Exception,e:
msg = '[{}]: Unable to construct history data; '.format(
self.head) + str(e)
raise VNPAST_DataConstructorError(msg)
#----------------------------------------------------------------------
# Datayes Api class
class PyApi(object):
"""
Python based Datayes Api object.
PyApi should be initialized with a Config json. The config must be complete,
in that once constructed, the private variables like request headers,
tokens, etc. become constant values (inherited from config), and will be
consistantly referred to whenever make requests.
privates
--------
* _config: Config object; a container of all useful settings when making
requests.
* _ssl, _domain, _domain_stream, _version, _header, _account_id:
boolean, string, string, string, dictionary, integer;
just private references to the items in Config. See the docs of Config().
* _session: requests.session object.
examples
--------
"""
_config = Config()
# request stuffs
_ssl = False
_domain = ''
_version = 'v1'
_header = dict()
_token = None
_session = requests.session()
def __init__(self, config):
"""
Constructor.
parameters
----------
* config: Config object; specifies user and connection configs.
"""
if config.body:
try:
self._config = config
self._ssl = config.body['ssl']
self._domain = config.body['domain']
self._version = config.body['version']
self._header = config.body['header']
except KeyError:
msg = '[API]: Unable to configure api; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[API]: Unable to configure api; ' + str(e)
raise VNPAST_ConfigError(msg)
# configure protocol
if self._ssl:
self._domain = 'https://' + self._domain
else:
self._domain = 'http://' + self._domain
def __access(self, url, params, method='GET'):
"""
request specific data from given url with parameters.
parameters
----------
* url: string.
* params: dictionary.
* method: string; 'GET' or 'POST', request method.
"""
try:
assert type(url) == str
assert type(params) == dict
except AssertionError,e:
raise e('[API]: Unvalid url or parameter input.')
if not self._session:
s = requests.session()
else: s = self._session
# prepare and send the request.
try:
req = requests.Request(method,
url = url,
headers = self._header,
params = params)
prepped = s.prepare_request(req) # prepare the request
resp = s.send(prepped, stream=False, verify=True)
if method == 'GET':
assert resp.status_code == 200
elif method == 'POST':
assert resp.status_code == 201
return resp
except AssertionError:
msg = '[API]: Bad request, unexpected response status: ' + \
str(resp.status_code)
raise VNPAST_RequestError(msg)
pass
except Exception,e:
msg = '[API]: Bad request.' + str(e)
raise VNPAST_RequestError(msg)
#----------------------------------------------------------------------
# directly get methods - Market data
def get_equity_M1_one(self,
start='', end='', secID='000001.XSHG'):
"""
Get 1-minute intraday bar data of one security.
parameters
----------
* start, end: string; Time mark formatted in 'HH:MM'. Specifies the
start/end point of bar. Note that the requested date is the
latest trading day (only one day), and the default start/end time is
'09:30' and min(now, '15:00'). Effective minute bars range from
09:30 - 11:30 in the morning and 13:01 - 15:00 in the afternoon.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
"""
url = '{}/{}/api/market/getBarRTIntraDay.json'.format(
self._domain, self._version)
params = {
'startTime': start,
'endTime': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
print resp.json()
data = Bar(resp.json())
return data
except AssertionError: return 0
def get_equity_M1(self, field='', start='20130701', end='20130730',
secID='000001.XSHG', output='df'):
"""
1-minute bar in a month, currently unavailable.
parameters
----------
* field: string; variables that are to be requested.
* start, end: string; Time mark formatted in 'YYYYMMDD'.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
url = '{}/{}/api/market/getBarHistDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'startDate': start,
'endDate': end,
'securityID': secID,
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = Bar(resp.json())
elif output == 'list':
data = resp.json()['data'][0]['barBodys']
return data
except AssertionError: return 0
def get_equity_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one security.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for securities)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- actPreClosePrice* double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- dealAmount* integer.
- turnoverRate double.
- accumAdjFactor* double.
- negMarketValue* double.
- marketValue* double.
- PE* double.
- PE1* double.
- PB* double.
Field is an optional parameter, default setting returns all fields.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of bar. Start and end are optional parameters. If
start, end and ticker are all specified, default 'one' value will be
abandoned.
* secID: string; the security ID in the form of '000001.XSHG', i.e.
ticker.exchange.
* ticker: string; the trading code in the form of '000001'.
* one: string; Date mark formatted in 'YYYYMMDD'.
Specifies one date on which data of all tickers are to be requested.
Note that to get effective json data response, at least one parameter
in {secID, ticker, tradeDate} should be entered.
* output: enumeration of strings; the format of output that will be
returned. default is 'df', optionals are:
- 'df': returns History object,
where ret.body is a dataframe.
- 'list': returns a list of dictionaries.
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktEqud.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
#return resp
except AssertionError: return 0
def get_block_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_repo_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513):
"""
"""
pass
def get_bond_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one bond instrument.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for bonds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- turnoverRate double.
- dealAmount* integer.
- accrInterest* double.
- YTM(yieldToMaturity)* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktBondd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one future contract.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for future contracts)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- contractObject* string.
- contractMark* string.
- preSettlePrice* double.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol integer.
- turnoverValue integer.
- openInt* integer.
- CHG* double.
- CHG1* double.
- CHGPct* double.
- mainCon* integer (0/1 flag).
- smainCon* integer (0/1 flag).
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFutd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_future_main_D1(self, field='', start='', end='', mark='',
obj='', main=1, one=20150513):
"""
"""
pass
def get_fund_D1(self, field='', start='', end='', secID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one mutual fund.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for funds)
- secID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
- discount* double.
- discountRatio* double.
- circulationShares* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktFundd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_index_D1(self, field='', start='', end='', indexID='',
ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one stock index.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for indices)
- indexID string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- porgFullName* string.
- exchangeCD string.
- preCloseIndex double.
- openIndex double.
- highestIndex double.
- lowestIndex double.
- closeIndex double.
- turnoverVol double.
- turnoverValue double.
- CHG* double.
- CHGPct* double.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktIdxd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'indexID': indexID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_option_D1(self, field='', start='', end='', secID='',
optID='' ,ticker='', one=20150513, output='df'):
"""
Get 1-day interday bar data of one option contact.
parameters
----------
* field: string; variables that are to be requested. Available variables
are: (* is unique for options)
- secID string.
- optID* string.
- tradeDate date(?).
- ticker string.
- secShortName string.
- exchangeCD string.
- preClosePrice double.
- openPrice double.
- highestPrice double.
- lowestPrice double.
- closePrice double.
- settlePrice* double.
- turnoverVol double.
- turnoverValue double.
- openInt* integer.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
if start and end and ticker:
one = '' # while user specifies start/end, covers tradeDate.
url = '{}/{}/api/market/getMktOptd.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'optID': optID,
'ticker': ticker,
'tradeDate': one
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
if output == 'df':
data = History(resp.json())
elif output == 'list':
data = resp.json()['data']
return data
except AssertionError: return 0
def get_stockFactor_D1(self, field='', secID='',
ticker='000001', start=20130701, end=20130801):
"""
Get 1-day interday factor data for stocks.
parameters
----------
* field: string; variables that are to be requested.
Field is an optional parameter, default setting returns all fields.
* start, end, secID, ticker, one, output
string, string, string, string, string, string(enum)
Same as above, reference: get_equity_D1().
"""
url = '{}/{}/api/market/getStockFactorsDateRange.json'.format(
self._domain, self._version)
params = {
'field': field,
'beginDate': start,
'endDate': end,
'secID': secID,
'ticker': ticker
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
#----------------------------------------------------------------------
# directly get methods - Fundamental Data
def get_balanceSheet(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtBS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_balanceSheet_bnk(self):
"""
"""
pass
def get_balanceSheet_sec(self):
"""
"""
pass
def get_balanceSheet_ins(self):
"""
"""
pass
def get_balanceSheet_ind(self):
"""
"""
pass
def get_cashFlow(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtCF.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_cashFlow_bnk(self):
"""
"""
pass
def get_cashFlow_sec(self):
"""
"""
pass
def get_cashFlow_ins(self):
"""
"""
pass
def get_cashFlow_ind(self):
"""
"""
pass
def get_incomeStatement(self, field='', secID='',
start='', end='', pubStart='', pubEnd='',
reportType='', ticker='000001'):
"""
"""
url = '{}/{}/api/fundamental/getFdmtIS.json'.format(
self._domain, self._version)
params = {
'field': field,
'secID': secID,
'ticker': ticker,
'beginDate': start,
'endDate': end,
'publishDateBegin': pubStart,
'publishDateEnd': pubEnd,
'reportType': reportType
}
try:
resp = self.__access(url=url, params=params)
assert len(resp.json()) > 0
data = History(resp.json())
return data
except AssertionError: return 0
def get_incomeStatement_bnk(self):
"""
"""
pass
def get_incomeStatement_sec(self):
"""
"""
pass
def get_incomeStatement_ins(self):
"""
"""
pass
def get_incomeStatement_ind(self):
"""
"""
pass
#----------------------------------------------------------------------
# multi-threading download for database storage.
def __drudgery(self, id, db, indexType,
start, end, tasks, target):
"""
basic drudgery function.
This method loops over a list of tasks(tickers) and get data using
target api.get_# method for all those tickers.
A new feature 'date' or 'dateTime'(for intraday) will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date(time) mark. With the setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* indexType: string(enum): 'date' or 'datetime', specifies what
is the collection index formatted.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
* target: method; the api.get_# method that is to be called by
drudgery function.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
if indexType == 'date':
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
elif indexType == 'datetime':
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
else:
raise ValueError
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = target(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_equity_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_equity_D1)
def get_future_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_future_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_future_D1)
def get_index_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_index_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_index_D1)
def get_bond_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_bond_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_bond_D1)
def get_fund_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_fund_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_fund_D1)
def get_option_D1_drudgery(self, id, db, start, end, tasks=[]):
"""
call __drudgery targeting at get_option_D1()
"""
self.__drudgery(id=id, db=db,
indexType = 'date',
start = start,
end = end,
tasks = tasks,
target = self.get_option_D1)
#----------------------------------------------------------------------
def __overlord(self, db, start, end, dName,
target1, target2, sessionNum):
"""
Basic controller of multithreading request.
Generates a list of all tickers, creates threads and distribute
tasks to individual #_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* dName: string; the path of file where all tickers' infomation
are stored in.
* target1: method; targetting api method that overlord calls
to get tasks list.
* target2: method; the corresponding drudgery function.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = target1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = target2,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
def get_equity_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get equity D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/equTicker.json',
target1 = self.get_equity_D1,
target2 = self.get_equity_D1_drudgery,
sessionNum = sessionNum)
def get_future_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get future D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/futTicker.json',
target1 = self.get_future_D1,
target2 = self.get_future_D1_drudgery,
sessionNum = sessionNum)
def get_index_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get index D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/idxTicker.json',
target1 = self.get_index_D1,
target2 = self.get_index_D1_drudgery,
sessionNum = sessionNum)
def get_bond_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get bond D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/bndTicker.json',
target1 = self.get_bond_D1,
target2 = self.get_bond_D1_drudgery,
sessionNum = sessionNum)
def get_fund_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get fund D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/fudTicker.json',
target1 = self.get_fund_D1,
target2 = self.get_fund_D1_drudgery,
sessionNum = sessionNum)
def get_option_D1_mongod(self, db, start, end, sessionNum=30):
"""
Controller of get option D1 method.
"""
self.__overlord(db = db,
start = start,
end = end,
dName = 'names/optTicker.json',
target1 = self.get_option_D1,
target2 = self.get_option_D1_drudgery,
sessionNum = sessionNum)
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
# to be deprecated
def get_equity_D1_drudgery_(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'date' will be automatically
added into every json-like documents, and specifies the datetime.
datetime() formatted date mark. With the default setting of MongoDB
in this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_dt: datetime.strptime(str_dt,'%Y-%m-%d')
update_dt = lambda d: d.update({'date':todt(d['tradeDate'])})
# loop over all tickers in task list.
k, n = 1, len(tasks)
for ticker in tasks:
try:
data = self.get_equity_D1(start = start,
end = end,
ticker = ticker,
output = 'list')
assert len(data) >= 1
map(update_dt, data) # add datetime feature to docs.
coll = db[ticker]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_D1_mongod_(self, db, start, end, sessionNum=30):
"""
Outer controller of get equity D1 method.
Generates a list of all tickers, creates threads and distribute
tasks to individual get_equity_D1_drudgery() functions.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# initialize task list.
dName = 'names/equTicker.json'
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
allTickers = json.loads(jsonFile.read())
jsonFile.close()
else:
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
chunkSize = len(allTickers)/sessionNum
taskLists = [allTickers[k:k+chunkSize] for k in range(
0, len(allTickers), chunkSize)]
k = 0
for tasks in taskLists:
thrd = Thread(target = self.get_equity_D1_drudgery,
args = (k, db, start, end, tasks))
thrd.start()
k += 1
return 1
#----------------------------------------------------------------------#
def get_equity_M1_drudgery(self, id, db,
start, end, tasks=[]):
"""
Drudgery function of getting equity_D1 bars.
This method loops over a list of tasks(tickers) and get D1 bar
for all these tickers. A new feature 'dateTime', combined by Y-m-d
formatted date part and H:M time part, will be automatically added into
every json-like documents. It would be a datetime.datetime() timestamp
object. In this module, this feature should be the unique index for all
collections.
By programatically assigning creating and assigning tasks to drudgery
functions, multi-threading download of data can be achieved.
parameters
----------
* id: integer; the ID of Drudgery session.
* db: pymongo.db object; the database which collections of bars will
go into.
* start, end: string; Date mark formatted in 'YYYYMMDD'. Specifies the
start/end point of collections of bars. Note that to ensure the
success of every requests, the range amid start and end had better be
no more than one month.
* tasks: list of strings; the tickers that this drudgery function
loops over.
"""
if len(tasks) == 0:
return 0
# str to datetime inline functions.
todt = lambda str_d, str_t: datetime.strptime(
str_d + ' ' + str_t,'%Y-%m-%d %H:%M')
update_dt = lambda d: d.update(
{'dateTime':todt(d['dataDate'], d['barTime'])})
k, n = 1, len(tasks)
for secID in tasks:
try:
data = self.get_equity_M1(start = start,
end = end,
secID = secID,
output = 'list')
map(update_dt, data) # add datetime feature to docs.
coll = db[secID]
coll.insert_many(data)
print '[API|Session{}]: '.format(id) + \
'Finished {} in {}.'.format(k, n)
k += 1
except ConnectionError:
# If choke connection, standby for 1sec an invoke again.
time.sleep(1)
self.get_equity_D1_drudgery(
id, db, start, end, tasks)
except AssertionError:
msg = '[API|Session{}]: '.format(id) + \
'Empty dataset in the response.'
print msg
pass
except Exception, e:
msg = '[API|Session{}]: '.format(id) + \
'Exception encountered when ' + \
'requesting data; ' + str(e)
print msg
pass
def get_equity_M1_interMonth(self, db, id,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
tasks=[]):
"""
Mid-level wrapper of get equity M1 method.
Get 1-minute bar between specified start year and ending year for
more than one tickers in tasks list.
parameters
----------
* db: pymongo.db object; the database which collections of bars will
go into. Note that this database will be transferred to every
drudgery functions created by controller.
* id: integer; the ID of wrapper session.
* startYr, endYr: integer; the start and ending year amid which the
1-minute bar data is gotten one month by another employing
get_equity_M1_drudgery() function.
Default values are this year and two years before now.
the complete time range will be sub-divided into months. And threads
are deployed for each of these months.
- example
-------
Suppose .now() is Auguest 15th 2015. (20150815)
startYr, endYr = 2014, 2015.
then two list of strings will be generated:
ymdStringStart = ['20140102','20140202', ... '20150802']
ymdStringEnd = ['20140101','20140201', ... '20150801']
the sub-timeRanges passed to drudgeries will be:
(start, end): (20140102, 20140201), (20140202, 20140301),
..., (20150702, 20150801).
So the actual time range is 20140102 - 20150801.
* sessionNum: integer; the number of threads that will be deploied.
Concretely, the list of all tickers will be sub-divided into chunks,
where chunkSize = len(allTickers)/sessionNum.
"""
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'02' for k in range(1,13)]
ymdStringStart = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStringEnd = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
k = 0
for t in range(len(ymdStringEnd)-1):
start = ymdStringStart[t]
end = ymdStringEnd[t+1]
subID = str(id) + '_' + str(k)
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (subID, db, start, end, tasks))
thrd.start()
k += 1
def get_equity_M1_all(self, db,
startYr=datetime.now().year-2,
endYr=datetime.now().year,
splitNum=10):
"""
"""
"""
# initialize task list.
data = self.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
chunkSize = len(allSecIds)/splitNum
taskLists = [allSecIds[k:k+chunkSize] for k in range(
0, len(allSecIds), chunkSize)]
# Construct yyyymmdd strings.(as ymdStrings list)
now = datetime.now()
years = [str(y) for y in range(startYr, endYr+1)]
monthDates = [(2-len(str(k)))*'0'+str(k)+'01' for k in range(1,13)]
ymdStrings = [y+md for y in years for md in monthDates if (
datetime.strptime(y+md,'%Y%m%d')<=now)]
print taskLists[0]
print ymdStrings
k = 0
for t in range(len(ymdStrings)-1):
start = ymdStrings[t]
end = ymdStrings[t+1]
thrd = Thread(target = self.get_equity_M1_drudgery,
args = (k, db, start, end, taskLists[0]))
thrd.start()
k += 1
return 1
"""
pass
| mit |
ferdinandvwyk/gs2_analysis | rms_fluctuation.py | 2 | 1541 | # This script calculates the maximum amplitude of the turbulent density
# fluctuations and outputs it as a function of time step
# Standard
import os
import sys
import json
# Third Party
import numpy as np
from netCDF4 import Dataset
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
plt.rcParams.update({'figure.autolayout': True})
mpl.rcParams['axes.unicode_minus']=False
#local
from run import Run
import plot_style
import field_helper as field
plot_style.white()
pal = sns.color_palette('deep')
def average_time_windows(n):
"""
Calculate a mean over 100 time step windows and return array of means.
"""
nt = n.shape[0]
nx = n.shape[1]
ny = n.shape[2]
time_window_size = 100
ntime_windows = int(nt/time_window_size)
rms_t = np.empty([ntime_windows], dtype=float)
for i in range(ntime_windows):
rms_t[i] = np.sqrt(np.mean(n[i*100:(i+1)*100, :, :]**2))
return rms_t
if __name__ == '__main__':
run = Run(sys.argv[1])
run.read_ntot()
os.system('mkdir -p ' + run.run_dir + 'analysis/amplitude')
res = {}
rms_i_t = average_time_windows(run.ntot_i)
res['ntot_i_rms'] = np.mean(rms_i_t)
res['ntot_i_err'] = np.std(rms_i_t)
rms_e_t = average_time_windows(run.ntot_e)
res['ntot_e_rms'] = np.mean(rms_e_t)
res['ntot_e_err'] = np.std(rms_e_t)
json.dump(res, open(run.run_dir + 'analysis/amplitude/rms.json', 'w'),
indent=2)
| gpl-2.0 |
siddharthteotia/arrow | python/pyarrow/tests/test_convert_pandas.py | 1 | 57443 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
from datetime import date, datetime, time, timedelta
import decimal
import json
import pytest
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.util.testing as tm
from pyarrow.compat import u, PY2
import pyarrow as pa
import pyarrow.types as patypes
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
def _alltypes_example(size=100):
return pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms,
# us, ns
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
def _check_pandas_roundtrip(df, expected=None, nthreads=1,
expected_schema=None,
check_dtype=True, schema=None,
preserve_index=False,
as_batch=False):
klass = pa.RecordBatch if as_batch else pa.Table
table = klass.from_pandas(df, schema=schema,
preserve_index=preserve_index,
nthreads=nthreads)
result = table.to_pandas(nthreads=nthreads)
if expected_schema:
assert table.schema.equals(expected_schema)
if expected is None:
expected = df
tm.assert_frame_equal(result, expected, check_dtype=check_dtype,
check_index_type=('equiv' if preserve_index
else False))
def _check_series_roundtrip(s, type_=None):
arr = pa.array(s, from_pandas=True, type=type_)
result = pd.Series(arr.to_pandas(), name=s.name)
if patypes.is_timestamp(arr.type) and arr.type.tz is not None:
result = (result.dt.tz_localize('utc')
.dt.tz_convert(arr.type.tz))
tm.assert_series_equal(s, result)
def _check_array_roundtrip(values, expected=None, mask=None,
type=None):
arr = pa.array(values, from_pandas=True, mask=mask, type=type)
result = arr.to_pandas()
values_nulls = pd.isnull(values)
if mask is None:
assert arr.null_count == values_nulls.sum()
else:
assert arr.null_count == (mask | values_nulls).sum()
if mask is None:
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
else:
expected = pd.Series(np.ma.masked_array(values, mask=mask))
tm.assert_series_equal(pd.Series(result), expected,
check_names=False)
def _check_array_from_pandas_roundtrip(np_array):
arr = pa.array(np_array, from_pandas=True)
result = arr.to_pandas()
npt.assert_array_equal(result, np_array)
class TestConvertMetadata(object):
"""
Conversion tests for Pandas metadata & indices.
"""
def test_non_string_columns(self):
df = pd.DataFrame({0: [1, 2, 3]})
table = pa.Table.from_pandas(df)
assert table.column(0).name == '0'
def test_column_index_names_are_preserved(self):
df = pd.DataFrame({'data': [1, 2, 3]})
df.columns.names = ['a']
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns(self):
columns = pd.MultiIndex.from_arrays([
['one', 'two'], ['X', 'Y']
])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_with_dtypes(self):
columns = pd.MultiIndex.from_arrays(
[
['one', 'two'],
pd.DatetimeIndex(['2017-08-01', '2017-08-02']),
],
names=['level_1', 'level_2'],
)
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_multiindex_columns_unicode(self):
columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']])
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns)
_check_pandas_roundtrip(df, preserve_index=True)
def test_integer_index_column(self):
df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')])
_check_pandas_roundtrip(df, preserve_index=True)
def test_index_metadata_field_name(self):
# test None case, and strangely named non-index columns
df = pd.DataFrame(
[(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)],
index=pd.MultiIndex.from_arrays(
[['c', 'b', 'a'], [3, 2, 1]],
names=[None, 'foo']
),
columns=['a', None, '__index_level_0__'],
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
col1, col2, col3, idx0, foo = js['columns']
assert col1['name'] == 'a'
assert col1['name'] == col1['field_name']
assert col2['name'] is None
assert col2['field_name'] == 'None'
assert col3['name'] == '__index_level_0__'
assert col3['name'] == col3['field_name']
idx0_name, foo_name = js['index_columns']
assert idx0_name == '__index_level_0__'
assert idx0['field_name'] == idx0_name
assert idx0['name'] is None
assert foo_name == 'foo'
assert foo['field_name'] == foo_name
assert foo['name'] == foo_name
def test_categorical_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), dtype='category')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'categorical'
assert column_indexes['numpy_type'] == 'int8'
md = column_indexes['metadata']
assert md['num_categories'] == 3
assert md['ordered'] is False
def test_string_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.Index(list('def'), name='stringz')
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] == 'stringz'
assert column_indexes['name'] == column_indexes['field_name']
assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode')
assert column_indexes['numpy_type'] == 'object'
md = column_indexes['metadata']
if not PY2:
assert len(md) == 1
assert md['encoding'] == 'UTF-8'
else:
assert md is None or 'encoding' not in md
def test_datetimetz_column_index(self):
df = pd.DataFrame(
[(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)],
columns=pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
)
t = pa.Table.from_pandas(df, preserve_index=True)
raw_metadata = t.schema.metadata
js = json.loads(raw_metadata[b'pandas'].decode('utf8'))
column_indexes, = js['column_indexes']
assert column_indexes['name'] is None
assert column_indexes['pandas_type'] == 'datetimetz'
assert column_indexes['numpy_type'] == 'datetime64[ns]'
md = column_indexes['metadata']
assert md['timezone'] == 'America/New_York'
def test_datetimetz_row_index(self):
df = pd.DataFrame({
'a': pd.date_range(
start='2017-01-01', periods=3, tz='America/New_York'
)
})
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_categorical_row_index(self):
df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]})
df['a'] = df.a.astype('category')
df = df.set_index('a')
_check_pandas_roundtrip(df, preserve_index=True)
def test_duplicate_column_names_does_not_crash(self):
df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa'))
with pytest.raises(ValueError):
pa.Table.from_pandas(df)
def test_dictionary_indices_boundscheck(self):
# ARROW-1658. No validation of indices leads to segfaults in pandas
indices = [[0, 1], [0, -1]]
for inds in indices:
arr = pa.DictionaryArray.from_arrays(inds, ['a'])
batch = pa.RecordBatch.from_arrays([arr], ['foo'])
table = pa.Table.from_batches([batch, batch, batch])
with pytest.raises(pa.ArrowException):
arr.to_pandas()
with pytest.raises(pa.ArrowException):
table.to_pandas()
def test_unicode_with_unicode_column_and_index(self):
df = pd.DataFrame({u'あ': [u'い']}, index=[u'う'])
_check_pandas_roundtrip(df, preserve_index=True)
def test_mixed_unicode_column_names(self):
df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う'])
# TODO(phillipc): Should this raise?
with pytest.raises(AssertionError):
_check_pandas_roundtrip(df, preserve_index=True)
def test_binary_column_name(self):
column_data = [u'い']
data = {u'あ'.encode('utf8'): column_data}
df = pd.DataFrame(data)
# we can't use _check_pandas_roundtrip here because our metdata
# is always decoded as utf8: even if binary goes in, utf8 comes out
t = pa.Table.from_pandas(df, preserve_index=True)
df2 = t.to_pandas()
assert df.values[0] == df2.values[0]
assert df.index.values[0] == df2.index.values[0]
assert df.columns[0] == df2.columns[0].encode('utf8')
def test_multiindex_duplicate_values(self):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
result_df = table.to_pandas()
tm.assert_frame_equal(result_df, df)
def test_metadata_with_mixed_types(self):
df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']})
table = pa.Table.from_pandas(df)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'bytes'
assert data_column['numpy_type'] == 'object'
def test_list_metadata(self):
df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]})
schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))])
table = pa.Table.from_pandas(df, schema=schema)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'list[int64]'
assert data_column['numpy_type'] == 'object'
def test_decimal_metadata(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
table = pa.Table.from_pandas(expected)
metadata = table.schema.metadata
assert b'mixed' not in metadata[b'pandas']
js = json.loads(metadata[b'pandas'].decode('utf8'))
data_column = js['columns'][0]
assert data_column['pandas_type'] == 'decimal'
assert data_column['numpy_type'] == 'object'
assert data_column['metadata'] == {'precision': 26, 'scale': 11}
def test_table_column_subset_metadata(self):
# ARROW-1883
df = pd.DataFrame({
'a': [1, 2, 3],
'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']])
# non-default index
for index in [
pd.Index(['a', 'b', 'c'], name='index'),
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]:
df = pd.DataFrame({'a': [1, 2, 3],
'b': [.1, .2, .3]}, index=index)
table = pa.Table.from_pandas(df)
table_subset = table.remove_column(1)
result = table_subset.to_pandas()
tm.assert_frame_equal(result, df[['a']])
table_subset2 = table_subset.remove_column(1)
result = table_subset2.to_pandas()
tm.assert_frame_equal(result, df[['a']].reset_index(drop=True))
def test_empty_list_metadata(self):
# Create table with array of empty lists, forced to have type
# list(string) in pyarrow
c1 = [["test"], ["a", "b"], None]
c2 = [[], [], []]
arrays = OrderedDict([
('c1', pa.array(c1, type=pa.list_(pa.string()))),
('c2', pa.array(c2, type=pa.list_(pa.string()))),
])
rb = pa.RecordBatch.from_arrays(
list(arrays.values()),
list(arrays.keys())
)
tbl = pa.Table.from_batches([rb])
# First roundtrip changes schema, because pandas cannot preserve the
# type of empty lists
df = tbl.to_pandas()
tbl2 = pa.Table.from_pandas(df, preserve_index=True)
md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8'))
# Second roundtrip
df2 = tbl2.to_pandas()
expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)]))
tm.assert_frame_equal(df2, expected)
assert md2['columns'] == [
{
'name': 'c1',
'field_name': 'c1',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[unicode]',
},
{
'name': 'c2',
'field_name': 'c2',
'metadata': None,
'numpy_type': 'object',
'pandas_type': 'list[empty]',
},
{
'name': None,
'field_name': '__index_level_0__',
'metadata': None,
'numpy_type': 'int64',
'pandas_type': 'int64',
}
]
class TestConvertPrimitiveTypes(object):
"""
Conversion tests for primitive (e.g. numeric) types.
"""
def test_float_no_nulls(self):
data = {}
fields = []
dtypes = [('f4', pa.float32()), ('f8', pa.float64())]
num_values = 100
for numpy_dtype, arrow_dtype in dtypes:
values = np.random.randn(num_values)
data[numpy_dtype] = values.astype(numpy_dtype)
fields.append(pa.field(numpy_dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_float_nulls(self):
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
dtypes = [('f4', pa.float32()), ('f8', pa.float64())]
names = ['f4', 'f8']
expected_cols = []
arrays = []
fields = []
for name, arrow_dtype in dtypes:
values = np.random.randn(num_values).astype(name)
arr = pa.array(values, from_pandas=True, mask=null_mask)
arrays.append(arr)
fields.append(pa.field(name, arrow_dtype))
values[null_mask] = np.nan
expected_cols.append(values)
ex_frame = pd.DataFrame(dict(zip(names, expected_cols)),
columns=names)
table = pa.Table.from_arrays(arrays, names)
assert table.schema.equals(pa.schema(fields))
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_integer_no_nulls(self):
data = OrderedDict()
fields = []
numpy_dtypes = [
('i1', pa.int8()), ('i2', pa.int16()),
('i4', pa.int32()), ('i8', pa.int64()),
('u1', pa.uint8()), ('u2', pa.uint16()),
('u4', pa.uint32()), ('u8', pa.uint64()),
('longlong', pa.int64()), ('ulonglong', pa.uint64())
]
num_values = 100
for dtype, arrow_dtype in numpy_dtypes:
info = np.iinfo(dtype)
values = np.random.randint(max(info.min, np.iinfo(np.int_).min),
min(info.max, np.iinfo(np.int_).max),
size=num_values)
data[dtype] = values.astype(dtype)
fields.append(pa.field(dtype, arrow_dtype))
df = pd.DataFrame(data)
schema = pa.schema(fields)
_check_pandas_roundtrip(df, expected_schema=schema)
def test_integer_with_nulls(self):
# pandas requires upcast to float dtype
int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
num_values = 100
null_mask = np.random.randint(0, 10, size=num_values) < 3
expected_cols = []
arrays = []
for name in int_dtypes:
values = np.random.randint(0, 100, size=num_values)
arr = pa.array(values, mask=null_mask)
arrays.append(arr)
expected = values.astype('f8')
expected[null_mask] = np.nan
expected_cols.append(expected)
ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)),
columns=int_dtypes)
table = pa.Table.from_arrays(arrays, int_dtypes)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_array_from_pandas_type_cast(self):
arr = np.arange(10, dtype='int64')
target_type = pa.int8()
result = pa.array(arr, type=target_type)
expected = pa.array(arr.astype('int8'))
assert result.equals(expected)
def test_boolean_no_nulls(self):
num_values = 100
np.random.seed(0)
df = pd.DataFrame({'bools': np.random.randn(num_values) > 0})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_boolean_nulls(self):
# pandas requires upcast to object dtype
num_values = 100
np.random.seed(0)
mask = np.random.randint(0, 10, size=num_values) < 3
values = np.random.randint(0, 10, size=num_values) < 5
arr = pa.array(values, mask=mask)
expected = values.astype(object)
expected[mask] = None
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
ex_frame = pd.DataFrame({'bools': expected})
table = pa.Table.from_arrays([arr], ['bools'])
assert table.schema.equals(schema)
result = table.to_pandas()
tm.assert_frame_equal(result, ex_frame)
def test_float_object_nulls(self):
arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object)
df = pd.DataFrame({'floats': arr})
expected = pd.DataFrame({'floats': pd.to_numeric(arr)})
field = pa.field('floats', pa.float64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_int_object_nulls(self):
arr = np.array([None, 1, np.int64(3)] * 5, dtype=object)
df = pd.DataFrame({'ints': arr})
expected = pd.DataFrame({'ints': pd.to_numeric(arr)})
field = pa.field('ints', pa.int64())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected=expected,
expected_schema=schema)
def test_boolean_object_nulls(self):
arr = np.array([False, None, True] * 100, dtype=object)
df = pd.DataFrame({'bools': arr})
field = pa.field('bools', pa.bool_())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_all_nulls_cast_numeric(self):
arr = np.array([None], dtype=object)
def _check_type(t):
a2 = pa.array(arr, type=t)
assert a2.type == t
assert a2[0].as_py() is None
_check_type(pa.int32())
_check_type(pa.float64())
class TestConvertDateTimeLikeTypes(object):
"""
Conversion tests for datetime- and timestamp-like types (date64, etc.).
"""
def test_timestamps_notimezone_no_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_notimezone_nulls(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
field = pa.field('datetime64', pa.timestamp('ns'))
schema = pa.schema([field])
_check_pandas_roundtrip(
df,
expected_schema=schema,
)
def test_timestamps_with_timezone(self):
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123',
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
_check_series_roundtrip(df['datetime64'])
# drop-in a null and ns instead of ms
df = pd.DataFrame({
'datetime64': np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
})
df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern')
.to_frame())
_check_pandas_roundtrip(df)
def test_python_datetime(self):
# ARROW-2106
date_array = [datetime.today() + timedelta(days=x) for x in range(10)]
df = pd.DataFrame({
'datetime': pd.Series(date_array, dtype=object)
})
table = pa.Table.from_pandas(df)
assert isinstance(table[0].data.chunk(0), pa.TimestampArray)
result = table.to_pandas()
expected_df = pd.DataFrame({
'datetime': date_array
})
tm.assert_frame_equal(expected_df, result)
def test_datetime64_to_date32(self):
# ARROW-1718
arr = pa.array([date(2017, 10, 23), None])
c = pa.Column.from_array("d", arr)
s = c.to_pandas()
arr2 = pa.Array.from_pandas(s, type=pa.date32())
assert arr2.equals(arr.cast('date32'))
def test_date_infer(self):
df = pd.DataFrame({
'date': [date(2000, 1, 1),
None,
date(1970, 1, 1),
date(2040, 2, 26)]})
table = pa.Table.from_pandas(df, preserve_index=False)
field = pa.field('date', pa.date32())
schema = pa.schema([field])
assert table.schema.equals(schema)
result = table.to_pandas()
expected = df.copy()
expected['date'] = pd.to_datetime(df['date'])
tm.assert_frame_equal(result, expected)
def test_date_mask(self):
arr = np.array([date(2017, 4, 3), date(2017, 4, 4)],
dtype='datetime64[D]')
mask = [True, False]
result = pa.array(arr, mask=np.array(mask))
expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]')
expected = pa.array(expected, from_pandas=True)
assert expected.equals(result)
def test_date_objects_typed(self):
arr = np.array([
date(2017, 4, 3),
None,
date(2017, 4, 4),
date(2017, 4, 5)], dtype=object)
arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32')
arr_i8 = arr_i4.astype('int64') * 86400000
mask = np.array([False, True, False, False])
t32 = pa.date32()
t64 = pa.date64()
a32 = pa.array(arr, type=t32)
a64 = pa.array(arr, type=t64)
a32_expected = pa.array(arr_i4, mask=mask, type=t32)
a64_expected = pa.array(arr_i8, mask=mask, type=t64)
assert a32.equals(a32_expected)
assert a64.equals(a64_expected)
# Test converting back to pandas
colnames = ['date32', 'date64']
table = pa.Table.from_arrays([a32, a64], colnames)
table_pandas = table.to_pandas()
ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04',
'2017-04-05'],
dtype='datetime64[D]')
.astype('datetime64[ns]'))
ex_values[1] = pd.NaT.value
expected_pandas = pd.DataFrame({'date32': ex_values,
'date64': ex_values},
columns=colnames)
tm.assert_frame_equal(table_pandas, expected_pandas)
def test_dates_from_integers(self):
t1 = pa.date32()
t2 = pa.date64()
arr = np.array([17259, 17260, 17261], dtype='int32')
arr2 = arr.astype('int64') * 86400000
a1 = pa.array(arr, type=t1)
a2 = pa.array(arr2, type=t2)
expected = date(2017, 4, 3)
assert a1[0].as_py() == expected
assert a2[0].as_py() == expected
@pytest.mark.xfail(reason="not supported ATM",
raises=NotImplementedError)
def test_timedelta(self):
# TODO(jreback): Pandas only support ns resolution
# Arrow supports ??? for resolution
df = pd.DataFrame({
'timedelta': np.arange(start=0, stop=3 * 86400000,
step=86400000,
dtype='timedelta64[ms]')
})
pa.Table.from_pandas(df)
def test_pytime_from_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356)]
# microseconds
t1 = pa.time64('us')
aobjs = np.array(pytimes + [None], dtype=object)
parr = pa.array(aobjs)
assert parr.type == t1
assert parr[0].as_py() == pytimes[0]
assert parr[1].as_py() == pytimes[1]
assert parr[2] is pa.NA
# DataFrame
df = pd.DataFrame({'times': aobjs})
batch = pa.RecordBatch.from_pandas(df)
assert batch[0].equals(parr)
# Test ndarray of int64 values
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
a1 = pa.array(arr, type=pa.time64('us'))
assert a1[0].as_py() == pytimes[0]
a2 = pa.array(arr * 1000, type=pa.time64('ns'))
assert a2[0].as_py() == pytimes[0]
a3 = pa.array((arr / 1000).astype('i4'),
type=pa.time32('ms'))
assert a3[0].as_py() == pytimes[0].replace(microsecond=1000)
a4 = pa.array((arr / 1000000).astype('i4'),
type=pa.time32('s'))
assert a4[0].as_py() == pytimes[0].replace(microsecond=0)
def test_arrow_time_to_pandas(self):
pytimes = [time(1, 2, 3, 1356),
time(4, 5, 6, 1356),
time(0, 0, 0)]
expected = np.array(pytimes[:2] + [None])
expected_ms = np.array([x.replace(microsecond=1000)
for x in pytimes[:2]] +
[None])
expected_s = np.array([x.replace(microsecond=0)
for x in pytimes[:2]] +
[None])
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
arr = np.array([_pytime_to_micros(v) for v in pytimes],
dtype='int64')
null_mask = np.array([False, False, True], dtype=bool)
a1 = pa.array(arr, mask=null_mask, type=pa.time64('us'))
a2 = pa.array(arr * 1000, mask=null_mask,
type=pa.time64('ns'))
a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask,
type=pa.time32('ms'))
a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask,
type=pa.time32('s'))
names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]']
batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names)
arr = a1.to_pandas()
assert (arr == expected).all()
arr = a2.to_pandas()
assert (arr == expected).all()
arr = a3.to_pandas()
assert (arr == expected_ms).all()
arr = a4.to_pandas()
assert (arr == expected_s).all()
df = batch.to_pandas()
expected_df = pd.DataFrame({'time64[us]': expected,
'time64[ns]': expected,
'time32[ms]': expected_ms,
'time32[s]': expected_s},
columns=names)
tm.assert_frame_equal(df, expected_df)
def test_numpy_datetime64_columns(self):
datetime64_ns = np.array([
'2007-07-13T01:23:34.123456789',
None,
'2006-01-13T12:34:56.432539784',
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ns]')
_check_array_from_pandas_roundtrip(datetime64_ns)
datetime64_us = np.array([
'2007-07-13T01:23:34.123456',
None,
'2006-01-13T12:34:56.432539',
'2010-08-13T05:46:57.437699'],
dtype='datetime64[us]')
_check_array_from_pandas_roundtrip(datetime64_us)
datetime64_ms = np.array([
'2007-07-13T01:23:34.123',
None,
'2006-01-13T12:34:56.432',
'2010-08-13T05:46:57.437'],
dtype='datetime64[ms]')
_check_array_from_pandas_roundtrip(datetime64_ms)
datetime64_s = np.array([
'2007-07-13T01:23:34',
None,
'2006-01-13T12:34:56',
'2010-08-13T05:46:57'],
dtype='datetime64[s]')
_check_array_from_pandas_roundtrip(datetime64_s)
def test_numpy_datetime64_day_unit(self):
datetime64_d = np.array([
'2007-07-13',
None,
'2006-01-15',
'2010-08-19'],
dtype='datetime64[D]')
_check_array_from_pandas_roundtrip(datetime64_d)
def test_array_from_pandas_date_with_mask(self):
m = np.array([True, False, True])
data = pd.Series([
date(1990, 1, 1),
date(1991, 1, 1),
date(1992, 1, 1)
])
result = pa.Array.from_pandas(data, mask=m)
expected = pd.Series([None, date(1991, 1, 1), None])
assert pa.Array.from_pandas(expected).equals(result)
class TestConvertStringLikeTypes(object):
"""
Conversion tests for string and binary types.
"""
def test_unicode(self):
repeats = 1000
values = [u'foo', None, u'bar', u'mañana', np.nan]
df = pd.DataFrame({'strings': values * repeats})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
_check_pandas_roundtrip(df, expected_schema=schema)
def test_bytes_to_binary(self):
values = [u('qux'), b'foo', None, 'bar', 'qux', np.nan]
df = pd.DataFrame({'strings': values})
table = pa.Table.from_pandas(df)
assert table[0].type == pa.binary()
values2 = [b'qux', b'foo', None, b'bar', b'qux', np.nan]
expected = pd.DataFrame({'strings': values2})
_check_pandas_roundtrip(df, expected)
@pytest.mark.large_memory
def test_bytes_exceed_2gb(self):
val = 'x' * (1 << 20)
df = pd.DataFrame({
'strings': np.array([val] * 4000, dtype=object)
})
arr = pa.array(df['strings'])
assert isinstance(arr, pa.ChunkedArray)
assert arr.num_chunks == 2
arr = None
table = pa.Table.from_pandas(df)
assert table[0].data.num_chunks == 2
def test_fixed_size_bytes(self):
values = [b'foo', None, b'bar', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
table = pa.Table.from_pandas(df, schema=schema)
assert table.schema[0].type == schema[0].type
assert table.schema[0].name == schema[0].name
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_fixed_size_bytes_does_not_accept_varying_lengths(self):
values = [b'foo', None, b'ba', None, None, b'hey']
df = pd.DataFrame({'strings': values})
schema = pa.schema([pa.field('strings', pa.binary(3))])
with pytest.raises(pa.ArrowInvalid):
pa.Table.from_pandas(df, schema=schema)
def test_table_empty_str(self):
values = ['', '', '', '', '']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result1 = table.to_pandas(strings_to_categorical=False)
expected1 = pd.DataFrame({'strings': values})
tm.assert_frame_equal(result1, expected1, check_dtype=True)
result2 = table.to_pandas(strings_to_categorical=True)
expected2 = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result2, expected2, check_dtype=True)
def test_table_str_to_categorical_without_na(self):
values = ['a', 'a', 'b', 'b', 'c']
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
def test_table_str_to_categorical_with_na(self):
values = [None, 'a', 'b', np.nan]
df = pd.DataFrame({'strings': values})
field = pa.field('strings', pa.string())
schema = pa.schema([field])
table = pa.Table.from_pandas(df, schema=schema)
result = table.to_pandas(strings_to_categorical=True)
expected = pd.DataFrame({'strings': pd.Categorical(values)})
tm.assert_frame_equal(result, expected, check_dtype=True)
with pytest.raises(pa.ArrowInvalid):
table.to_pandas(strings_to_categorical=True,
zero_copy_only=True)
class TestConvertDecimalTypes(object):
"""
Conversion test for decimal types.
"""
def test_decimal_32_from_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439'),
]
})
converted = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', pa.decimal128(7, 3))
schema = pa.schema([field])
assert converted.schema.equals(schema)
def test_decimal_32_to_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('-1234.123'),
decimal.Decimal('1234.439'),
]
})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_64_from_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731'),
]
})
converted = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', pa.decimal128(12, 6))
schema = pa.schema([field])
assert converted.schema.equals(schema)
def test_decimal_64_to_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('-129934.123331'),
decimal.Decimal('129534.123731'),
]
})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
def test_decimal_128_from_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
converted = pa.Table.from_pandas(expected, preserve_index=False)
field = pa.field('decimals', pa.decimal128(26, 11))
schema = pa.schema([field])
assert converted.schema.equals(schema)
def test_decimal_128_to_pandas(self):
expected = pd.DataFrame({
'decimals': [
decimal.Decimal('394092382910493.12341234678'),
-decimal.Decimal('314292388910493.12343437128'),
]
})
converted = pa.Table.from_pandas(expected)
df = converted.to_pandas()
tm.assert_frame_equal(df, expected)
class TestListTypes(object):
"""
Conversion tests for list<> types.
"""
def test_column_of_arrays(self):
df, schema = dataframe_with_arrays()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
assert table.schema.equals(schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_arrays_to_py(self):
# Test regression in ARROW-1199 not caught in above test
dtype = 'i1'
arr = np.array([
np.arange(10, dtype=dtype),
np.arange(5, dtype=dtype),
None,
np.arange(1, dtype=dtype)
])
type_ = pa.list_(pa.int8())
parr = pa.array(arr, type=type_)
assert parr[0].as_py() == list(range(10))
assert parr[1].as_py() == list(range(5))
assert parr[2].as_py() is None
assert parr[3].as_py() == [0]
def test_column_of_lists(self):
df, schema = dataframe_with_lists()
_check_pandas_roundtrip(df, schema=schema, expected_schema=schema)
table = pa.Table.from_pandas(df, schema=schema, preserve_index=False)
assert table.schema.equals(schema)
for column in df.columns:
field = schema.field_by_name(column)
_check_array_roundtrip(df[column], type=field.type)
def test_column_of_lists_first_empty(self):
# ARROW-2124
num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]]
series = pd.Series([np.array(s, dtype=float) for s in num_lists])
arr = pa.array(series)
result = pd.Series(arr.to_pandas())
tm.assert_series_equal(result, series)
def test_column_of_lists_chunked(self):
# ARROW-1357
df = pd.DataFrame({
'lists': np.array([
[1, 2],
None,
[2, 3],
[4, 5],
[6, 7],
[8, 9]
], dtype=object)
})
schema = pa.schema([
pa.field('lists', pa.list_(pa.int64()))
])
t1 = pa.Table.from_pandas(df[:2], schema=schema)
t2 = pa.Table.from_pandas(df[2:], schema=schema)
table = pa.concat_tables([t1, t2])
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_column_of_lists_chunked2(self):
data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11],
[12, 13], [14, 15], [16, 17]]
data2 = [[8, 9], [18, 19]]
a1 = pa.array(data1)
a2 = pa.array(data2)
t1 = pa.Table.from_arrays([a1], names=['a'])
t2 = pa.Table.from_arrays([a2], names=['a'])
concatenated = pa.concat_tables([t1, t2])
result = concatenated.to_pandas()
expected = pd.DataFrame({'a': data1 + data2})
tm.assert_frame_equal(result, expected)
def test_column_of_lists_strided(self):
df, schema = dataframe_with_lists()
df = pd.concat([df] * 6, ignore_index=True)
arr = df['int64'].values[::3]
assert arr.strides[0] != 8
_check_array_roundtrip(arr)
def test_nested_lists_all_none(self):
data = np.array([[None, None], None], dtype=object)
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
data2 = np.array([None, None, [None, None],
np.array([None, None], dtype=object)],
dtype=object)
arr = pa.array(data2)
expected = pa.array([None, None, [None, None], [None, None]])
assert arr.equals(expected)
def test_nested_lists_all_empty(self):
# ARROW-2128
data = pd.Series([[], [], []])
arr = pa.array(data)
expected = pa.array(list(data))
assert arr.equals(expected)
assert arr.type == pa.list_(pa.null())
def test_infer_lists(self):
data = OrderedDict([
('nan_ints', [[None, 1], [2, 3]]),
('ints', [[0, 1], [2, 3]]),
('strs', [[None, u'b'], [u'c', u'd']]),
('nested_strs', [[[None, u'b'], [u'c', u'd']], None])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('nan_ints', pa.list_(pa.int64())),
pa.field('ints', pa.list_(pa.int64())),
pa.field('strs', pa.list_(pa.string())),
pa.field('nested_strs', pa.list_(pa.list_(pa.string())))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
def test_infer_numpy_array(self):
data = OrderedDict([
('ints', [
np.array([0, 1], dtype=np.int64),
np.array([2, 3], dtype=np.int64)
])
])
df = pd.DataFrame(data)
expected_schema = pa.schema([
pa.field('ints', pa.list_(pa.int64()))
])
_check_pandas_roundtrip(df, expected_schema=expected_schema)
@pytest.mark.parametrize('t,data,expected', [
(
pa.int64,
[[1, 2], [3], None],
[None, [3], None]
),
(
pa.string,
[[u'aaa', u'bb'], [u'c'], None],
[None, [u'c'], None]
),
(
pa.null,
[[None, None], [None], None],
[None, [None], None]
)
])
def test_array_from_pandas_typed_array_with_mask(self, t, data, expected):
m = np.array([True, False, True])
s = pd.Series(data)
result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t()))
assert pa.Array.from_pandas(expected,
type=pa.list_(t())).equals(result)
def test_empty_list_roundtrip(self):
empty_list_array = np.empty((3,), dtype=object)
empty_list_array.fill([])
df = pd.DataFrame({'a': np.array(['1', '2', '3']),
'b': empty_list_array})
tbl = pa.Table.from_pandas(df)
result = tbl.to_pandas()
tm.assert_frame_equal(result, df)
def test_array_from_nested_arrays(self):
df, schema = dataframe_with_arrays()
for field in schema:
arr = df[field.name].values
expected = pa.array(list(arr), type=field.type)
result = pa.array(arr)
assert result.type == field.type # == list<scalar>
assert result.equals(expected)
class TestConvertStructTypes(object):
"""
Conversion tests for struct types.
"""
def test_structarray(self):
ints = pa.array([None, 2, 3], type=pa.int64())
strs = pa.array([u'a', None, u'c'], type=pa.string())
bools = pa.array([True, False, None], type=pa.bool_())
arr = pa.StructArray.from_arrays(
[ints, strs, bools],
['ints', 'strs', 'bools'])
expected = pd.Series([
{'ints': None, 'strs': u'a', 'bools': True},
{'ints': 2, 'strs': None, 'bools': False},
{'ints': 3, 'strs': u'c', 'bools': None},
])
series = pd.Series(arr.to_pandas())
tm.assert_series_equal(series, expected)
class TestZeroCopyConversion(object):
"""
Tests that zero-copy conversion works with some types.
"""
def test_zero_copy_success(self):
result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True)
npt.assert_array_equal(result, [0, 1, 2])
def test_zero_copy_dictionaries(self):
arr = pa.DictionaryArray.from_arrays(
np.array([0, 0]),
np.array([5]))
result = arr.to_pandas(zero_copy_only=True)
values = pd.Categorical([5, 5])
tm.assert_series_equal(pd.Series(result), pd.Series(values),
check_names=False)
def test_zero_copy_failure_on_object_types(self):
with pytest.raises(pa.ArrowException):
pa.array(['A', 'B', 'C']).to_pandas(zero_copy_only=True)
def test_zero_copy_failure_with_int_when_nulls(self):
with pytest.raises(pa.ArrowException):
pa.array([0, 1, None]).to_pandas(zero_copy_only=True)
def test_zero_copy_failure_with_float_when_nulls(self):
with pytest.raises(pa.ArrowException):
pa.array([0.0, 1.0, None]).to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_bool_types(self):
with pytest.raises(pa.ArrowException):
pa.array([True, False]).to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_list_types(self):
arr = np.array([[1, 2], [8, 9]], dtype=object)
with pytest.raises(pa.ArrowException):
pa.array(arr).to_pandas(zero_copy_only=True)
def test_zero_copy_failure_on_timestamp_types(self):
arr = np.array(['2007-07-13'], dtype='datetime64[ns]')
with pytest.raises(pa.ArrowException):
pa.array(arr).to_pandas(zero_copy_only=True)
class TestConvertMisc(object):
"""
Miscellaneous conversion tests.
"""
type_pairs = [
(np.int8, pa.int8()),
(np.int16, pa.int16()),
(np.int32, pa.int32()),
(np.int64, pa.int64()),
(np.uint8, pa.uint8()),
(np.uint16, pa.uint16()),
(np.uint32, pa.uint32()),
(np.uint64, pa.uint64()),
# (np.float16, pa.float16()), # XXX unsupported
(np.float32, pa.float32()),
(np.float64, pa.float64()),
# XXX unsupported
# (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])),
(np.object, pa.string()),
# (np.object, pa.binary()), # XXX unsupported
(np.object, pa.binary(10)),
(np.object, pa.list_(pa.int64())),
]
def test_all_none_objects(self):
df = pd.DataFrame({'a': [None, None, None]})
_check_pandas_roundtrip(df)
def test_all_none_category(self):
df = pd.DataFrame({'a': [None, None, None]})
df['a'] = df['a'].astype('category')
_check_pandas_roundtrip(df)
def test_empty_arrays(self):
for dtype, pa_type in self.type_pairs:
arr = np.array([], dtype=dtype)
_check_array_roundtrip(arr, type=pa_type)
def test_threaded_conversion(self):
df = _alltypes_example()
_check_pandas_roundtrip(df, nthreads=2)
_check_pandas_roundtrip(df, nthreads=2, as_batch=True)
def test_category(self):
repeats = 5
v1 = ['foo', None, 'bar', 'qux', np.nan]
v2 = [4, 5, 6, 7, 8]
v3 = [b'foo', None, b'bar', b'qux', np.nan]
df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats),
'cat_ints': pd.Categorical(v2 * repeats),
'cat_binary': pd.Categorical(v3 * repeats),
'cat_strings_ordered': pd.Categorical(
v1 * repeats, categories=['bar', 'qux', 'foo'],
ordered=True),
'ints': v2 * repeats,
'ints2': v2 * repeats,
'strings': v1 * repeats,
'strings2': v1 * repeats,
'strings3': v3 * repeats})
_check_pandas_roundtrip(df)
arrays = [
pd.Categorical(v1 * repeats),
pd.Categorical(v2 * repeats),
pd.Categorical(v3 * repeats)
]
for values in arrays:
_check_array_roundtrip(values)
def test_mixed_types_fails(self):
data = pd.DataFrame({'a': ['a', 1, 2.0]})
with pytest.raises(pa.ArrowException):
pa.Table.from_pandas(data)
data = pd.DataFrame({'a': [1, True]})
with pytest.raises(pa.ArrowException):
pa.Table.from_pandas(data)
def test_strided_data_import(self):
cases = []
columns = ['a', 'b', 'c']
N, K = 100, 3
random_numbers = np.random.randn(N, K).copy() * 100
numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8',
'f4', 'f8']
for type_name in numeric_dtypes:
cases.append(random_numbers.astype(type_name))
# strings
cases.append(np.array([tm.rands(10) for i in range(N * K)],
dtype=object)
.reshape(N, K).copy())
# booleans
boolean_objects = (np.array([True, False, True] * N, dtype=object)
.reshape(N, K).copy())
# add some nulls, so dtype comes back as objects
boolean_objects[5] = None
cases.append(boolean_objects)
cases.append(np.arange("2016-01-01T00:00:00.001", N * K,
dtype='datetime64[ms]')
.reshape(N, K).copy())
strided_mask = (random_numbers > 0).astype(bool)[:, 0]
for case in cases:
df = pd.DataFrame(case, columns=columns)
col = df['a']
_check_pandas_roundtrip(df)
_check_array_roundtrip(col)
_check_array_roundtrip(col, mask=strided_mask)
def test_all_nones(self):
def _check_series(s):
converted = pa.array(s)
assert isinstance(converted, pa.NullArray)
assert len(converted) == 3
assert converted.null_count == 3
assert converted[0] is pa.NA
_check_series(pd.Series([None] * 3, dtype=object))
_check_series(pd.Series([np.nan] * 3, dtype=object))
_check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object))
def test_partial_schema(self):
data = OrderedDict([
('a', [0, 1, 2, 3, 4]),
('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)),
('c', [-10, -5, 0, 5, 10])
])
df = pd.DataFrame(data)
partial_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32())
])
expected_schema = pa.schema([
pa.field('a', pa.int64()),
pa.field('b', pa.int32()),
pa.field('c', pa.int64())
])
_check_pandas_roundtrip(df, schema=partial_schema,
expected_schema=expected_schema)
def test_table_batch_empty_dataframe(self):
df = pd.DataFrame({})
_check_pandas_roundtrip(df)
_check_pandas_roundtrip(df, as_batch=True)
df2 = pd.DataFrame({}, index=[0, 1, 2])
_check_pandas_roundtrip(df2, preserve_index=True)
_check_pandas_roundtrip(df2, as_batch=True, preserve_index=True)
def test_convert_empty_table(self):
arr = pa.array([], type=pa.int64())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64))
arr = pa.array([], type=pa.string())
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.list_(pa.int64()))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())]))
tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object))
def _fully_loaded_dataframe_example():
from distutils.version import LooseVersion
index = pd.MultiIndex.from_arrays([
pd.date_range('2000-01-01', periods=5).repeat(2),
np.tile(np.array(['foo', 'bar'], dtype=object), 5)
])
c1 = pd.date_range('2000-01-01', periods=10)
data = {
0: c1,
1: c1.tz_localize('utc'),
2: c1.tz_localize('US/Eastern'),
3: c1[::2].tz_localize('utc').repeat(2).astype('category'),
4: ['foo', 'bar'] * 5,
5: pd.Series(['foo', 'bar'] * 5).astype('category').values,
6: [True, False] * 5,
7: np.random.randn(10),
8: np.random.randint(0, 100, size=10),
9: pd.period_range('2013', periods=10, freq='M')
}
if LooseVersion(pd.__version__) >= '0.21':
# There is an issue with pickling IntervalIndex in pandas 0.20.x
data[10] = pd.interval_range(start=1, freq=1, periods=10)
return pd.DataFrame(data, index=index)
def _check_serialize_components_roundtrip(df):
ctx = pa.default_serialization_context()
components = ctx.serialize(df).to_components()
deserialized = ctx.deserialize_components(components)
tm.assert_frame_equal(df, deserialized)
def test_serialize_deserialize_pandas():
# ARROW-1784, serialize and deserialize DataFrame by decomposing
# BlockManager
df = _fully_loaded_dataframe_example()
_check_serialize_components_roundtrip(df)
def _pytime_from_micros(val):
microseconds = val % 1000000
val //= 1000000
seconds = val % 60
val //= 60
minutes = val % 60
hours = val // 60
return time(hours, minutes, seconds, microseconds)
def _pytime_to_micros(pytime):
return (pytime.hour * 3600000000 +
pytime.minute * 60000000 +
pytime.second * 1000000 +
pytime.microsecond)
| apache-2.0 |
AlexRobson/scikit-learn | sklearn/setup.py | 225 | 2856 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('svm')
config.add_subpackage('datasets')
config.add_subpackage('datasets/tests')
config.add_subpackage('feature_extraction')
config.add_subpackage('feature_extraction/tests')
config.add_subpackage('cluster')
config.add_subpackage('cluster/tests')
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('decomposition')
config.add_subpackage('decomposition/tests')
config.add_subpackage("ensemble")
config.add_subpackage("ensemble/tests")
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_subpackage('externals')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('neighbors')
config.add_subpackage('neural_network')
config.add_subpackage('preprocessing')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('semi_supervised')
config.add_subpackage("tree")
config.add_subpackage("tree/tests")
config.add_subpackage('metrics/tests')
config.add_subpackage('metrics/cluster')
config.add_subpackage('metrics/cluster/tests')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
DinoCow/airflow | docs/conf.py | 5 | 18135 | # flake8: noqa
# Disable Flake8 because of all the sphinx imports
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Airflow documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 9 20:50:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
"""Configuration of Airflow Docs"""
import glob
import os
import sys
from typing import Any, Dict, List, Optional
import yaml
import airflow
from airflow.configuration import default_config_yaml
from docs.exts.docs_build.third_party_inventories import ( # pylint: disable=no-name-in-module,wrong-import-order
THIRD_PARTY_INDEXES,
)
sys.path.append(os.path.join(os.path.dirname(__file__), 'exts'))
CONF_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
INVENTORY_CACHE_DIR = os.path.join(CONF_DIR, '_inventory_cache')
ROOT_DIR = os.path.abspath(os.path.join(CONF_DIR, os.pardir))
FOR_PRODUCTION = os.environ.get('AIRFLOW_FOR_PRODUCTION', 'false') == 'true'
# By default (e.g. on RTD), build docs for `airflow` package
PACKAGE_NAME = os.environ.get('AIRFLOW_PACKAGE_NAME', 'apache-airflow')
PACKAGE_DIR: Optional[str]
if PACKAGE_NAME == 'apache-airflow':
PACKAGE_DIR = os.path.join(ROOT_DIR, 'airflow')
PACKAGE_VERSION = airflow.__version__
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
from provider_yaml_utils import load_package_data # pylint: disable=no-name-in-module
ALL_PROVIDER_YAMLS = load_package_data()
try:
CURRENT_PROVIDER = next(
provider_yaml
for provider_yaml in ALL_PROVIDER_YAMLS
if provider_yaml['package-name'] == PACKAGE_NAME
)
except StopIteration:
raise Exception(f"Could not find provider.yaml file for package: {PACKAGE_NAME}")
PACKAGE_DIR = CURRENT_PROVIDER['package-dir']
PACKAGE_VERSION = 'master'
else:
PACKAGE_DIR = None
PACKAGE_VERSION = 'master'
# Adds to environment variables for easy access from other plugins like airflow_intersphinx.
os.environ['AIRFLOW_PACKAGE_NAME'] = PACKAGE_NAME
if PACKAGE_DIR:
os.environ['AIRFLOW_PACKAGE_DIR'] = PACKAGE_DIR
os.environ['AIRFLOW_PACKAGE_VERSION'] = PACKAGE_VERSION
# Hack to allow changing for piece of the code to behave differently while
# the docs are being built. The main objective was to alter the
# behavior of the utils.apply_default that was hiding function headers
os.environ['BUILDING_AIRFLOW_DOCS'] = 'TRUE'
# == Sphinx configuration ======================================================
# -- Project information -------------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
# General information about the project.
project = PACKAGE_NAME
# # The version info for the project you're documenting
version = PACKAGE_VERSION
# The full version, including alpha/beta/rc tags.
release = PACKAGE_VERSION
# -- General configuration -----------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'provider_init_hack',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinxarg.ext',
'sphinx.ext.intersphinx',
'exampleinclude',
'docroles',
'removemarktransform',
'sphinx_copybutton',
'airflow_intersphinx',
"sphinxcontrib.spelling",
'sphinx_airflow_theme',
'redirects',
]
if PACKAGE_NAME == 'apache-airflow':
extensions.extend(
[
'sphinxcontrib.jinja',
'sphinx.ext.graphviz',
'sphinxcontrib.httpdomain',
'sphinxcontrib.httpdomain',
# First, generate redoc
'sphinxcontrib.redoc',
# Second, update redoc script
"sphinx_script_update",
]
)
if PACKAGE_NAME == "apache-airflow-providers":
extensions.extend(
[
'operators_and_hooks_ref',
'providers_packages_ref',
]
)
else:
extensions.append('autoapi.extension')
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns: List[str]
if PACKAGE_NAME == 'apache-airflow':
exclude_patterns = [
# We only link to selected subpackages.
'_api/airflow/index.rst',
'README.rst',
]
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
exclude_patterns = [
'operators/_partials',
]
else:
exclude_patterns = []
def _get_rst_filepath_from_path(filepath: str):
if os.path.isdir(filepath):
result = filepath
elif os.path.isfile(filepath) and filepath.endswith('/__init__.py'):
result = filepath.rpartition("/")[0]
else:
result = filepath.rpartition(".")[0]
result += "/index.rst"
result = f"_api/{os.path.relpath(result, ROOT_DIR)}"
return result
if PACKAGE_NAME == 'apache-airflow':
# Exclude top-level packages
# do not exclude these top-level modules from the doc build:
_allowed_top_level = ("exceptions.py",)
for path in glob.glob(f"{ROOT_DIR}/airflow/*"):
name = os.path.basename(path)
if os.path.isfile(path) and not path.endswith(_allowed_top_level):
exclude_patterns.append(f"_api/airflow/{name.rpartition('.')[0]}")
browsable_packages = ["operators", "hooks", "sensors", "providers", "executors", "models", "secrets"]
if os.path.isdir(path) and name not in browsable_packages:
exclude_patterns.append(f"_api/airflow/{name}")
else:
exclude_patterns.extend(
_get_rst_filepath_from_path(f) for f in glob.glob(f"{PACKAGE_DIR}/**/example_dags/**/*.py")
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ---------------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_airflow_theme'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
if PACKAGE_NAME == 'apache-airflow':
html_title = "Airflow Documentation"
else:
html_title = f"{PACKAGE_NAME} Documentation"
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = ""
# given, this must be the name of an image file (path relative to the
# configuration directory) that is the favicon of the docs. Modern browsers
# use this as the icon for tabs, windows and bookmarks. It should be a
# Windows-style icon file (.ico), which is 16x16 or 32x32 pixels large.
html_favicon = "../airflow/www/static/pin_32.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
if PACKAGE_NAME == 'apache-airflow':
html_static_path = ['apache-airflow/static']
else:
html_static_path = []
# A list of JavaScript filename. The entry must be a filename string or a
# tuple containing the filename string and the attributes dictionary. The
# filename must be relative to the html_static_path, or a full URI with
# scheme like http://example.org/script.js.
if PACKAGE_NAME == 'apache-airflow':
html_js_files = ['jira-links.js']
else:
html_js_files = []
# -- Theme configuration -------------------------------------------------------
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'version-selector.html',
'searchbox.html',
'globaltoc.html',
]
if FOR_PRODUCTION
else [
'searchbox.html',
'globaltoc.html',
]
}
# If false, no index is generated.
html_use_index = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# Theme configuration
html_theme_options: Dict[str, Any] = {
'hide_website_buttons': True,
}
if FOR_PRODUCTION:
html_theme_options['navbar_links'] = [
{'href': '/community/', 'text': 'Community'},
{'href': '/meetups/', 'text': 'Meetups'},
{'href': '/docs/', 'text': 'Documentation'},
{'href': '/use-cases/', 'text': 'Use-cases'},
{'href': '/announcements/', 'text': 'Announcements'},
{'href': '/blog/', 'text': 'Blog'},
{'href': '/ecosystem/', 'text': 'Ecosystem'},
]
# A dictionary of values to pass into the template engine’s context for all pages.
html_context = {
# Google Analytics ID.
# For more information look at:
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/layout.html#L222-L232
'theme_analytics_id': 'UA-140539454-1',
# Variables used to build a button for editing the source code
#
# The path is created according to the following template:
#
# https://{{ github_host|default("github.com") }}/{{ github_user }}/{{ github_repo }}/
# {{ theme_vcs_pageview_mode|default("blob") }}/{{ github_version }}{{ conf_py_path }}
# {{ pagename }}{{ suffix }}
#
# More information:
# https://github.com/readthedocs/readthedocs.org/blob/master/readthedocs/doc_builder/templates/doc_builder/conf.py.tmpl#L100-L103
# https://github.com/readthedocs/sphinx_rtd_theme/blob/master/sphinx_rtd_theme/breadcrumbs.html#L45
# https://github.com/apache/airflow-site/blob/91f760c/sphinx_airflow_theme/sphinx_airflow_theme/suggest_change_button.html#L36-L40
#
'theme_vcs_pageview_mode': 'edit',
'conf_py_path': f'/docs/{PACKAGE_NAME}/',
'github_user': 'apache',
'github_repo': 'airflow',
'github_version': 'master',
'display_github': 'master',
'suffix': '.rst',
}
# == Extensions configuration ==================================================
# -- Options for sphinxcontrib.jinjac ------------------------------------------
# See: https://github.com/tardyp/sphinx-jinja
# Jinja context
if PACKAGE_NAME == 'apache-airflow':
jinja_contexts = {'config_ctx': {"configs": default_config_yaml()}}
elif PACKAGE_NAME.startswith('apache-airflow-providers-'):
def _load_config():
templates_dir = os.path.join(PACKAGE_DIR, 'config_templates')
file_path = os.path.join(templates_dir, "config.yml")
if not os.path.exists(file_path):
return {}
with open(file_path) as config_file:
return yaml.safe_load(config_file)
config = _load_config()
if config:
jinja_contexts = {'config_ctx': {"configs": config}}
extensions.append('sphinxcontrib.jinja')
# -- Options for sphinx.ext.autodoc --------------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
# This value contains a list of modules to be mocked up. This is useful when some external dependencies
# are not met at build time and break the building process.
autodoc_mock_imports = [
'MySQLdb',
'adal',
'analytics',
'azure',
'azure.cosmos',
'azure.datalake',
'azure.kusto',
'azure.mgmt',
'boto3',
'botocore',
'bson',
'cassandra',
'celery',
'cloudant',
'cryptography',
'cx_Oracle',
'datadog',
'distributed',
'docker',
'google',
'google_auth_httplib2',
'googleapiclient',
'grpc',
'hdfs',
'httplib2',
'jaydebeapi',
'jenkins',
'jira',
'kubernetes',
'msrestazure',
'pandas',
'pandas_gbq',
'paramiko',
'pinotdb',
'psycopg2',
'pydruid',
'pyhive',
'pyhive',
'pymongo',
'pymssql',
'pysftp',
'qds_sdk',
'redis',
'simple_salesforce',
'slackclient',
'smbclient',
'snowflake',
'sshtunnel',
'telegram',
'tenacity',
'vertica_python',
'winrm',
'zdesk',
]
# The default options for autodoc directives. They are applied to all autodoc directives automatically.
autodoc_default_options = {'show-inheritance': True, 'members': True}
# -- Options for sphinx.ext.intersphinx ----------------------------------------
# See: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
# This config value contains names of other projects that should
# be linked to in this documentation.
# Inventories are only downloaded once by docs/exts/docs_build/fetch_inventories.py.
intersphinx_mapping = {
pkg_name: (f"{THIRD_PARTY_INDEXES[pkg_name]}/", (f'{INVENTORY_CACHE_DIR}/{pkg_name}/objects.inv',))
for pkg_name in [
'boto3',
'celery',
'hdfs',
'jinja2',
'mongodb',
'pandas',
'python',
'requests',
'sqlalchemy',
]
}
if PACKAGE_NAME in ('apache-airflow-providers-google', 'apache-airflow'):
intersphinx_mapping.update(
{
pkg_name: (
f"{THIRD_PARTY_INDEXES[pkg_name]}/",
(f'{INVENTORY_CACHE_DIR}/{pkg_name}/objects.inv',),
)
for pkg_name in [
'google-api-core',
'google-cloud-automl',
'google-cloud-bigquery',
'google-cloud-bigquery-datatransfer',
'google-cloud-bigquery-storage',
'google-cloud-bigtable',
'google-cloud-container',
'google-cloud-core',
'google-cloud-datacatalog',
'google-cloud-datastore',
'google-cloud-dlp',
'google-cloud-kms',
'google-cloud-language',
'google-cloud-monitoring',
'google-cloud-pubsub',
'google-cloud-redis',
'google-cloud-spanner',
'google-cloud-speech',
'google-cloud-storage',
'google-cloud-tasks',
'google-cloud-texttospeech',
'google-cloud-translate',
'google-cloud-videointelligence',
'google-cloud-vision',
]
}
)
# -- Options for sphinx.ext.viewcode -------------------------------------------
# See: https://www.sphinx-doc.org/es/master/usage/extensions/viewcode.html
# If this is True, viewcode extension will emit viewcode-follow-imported event to resolve the name of
# the module by other extensions. The default is True.
viewcode_follow_imported_members = True
# -- Options for sphinx-autoapi ------------------------------------------------
# See: https://sphinx-autoapi.readthedocs.io/en/latest/config.html
# Paths (relative or absolute) to the source code that you wish to generate
# your API documentation from.
autoapi_dirs = [
PACKAGE_DIR,
]
# A directory that has user-defined templates to override our default templates.
if PACKAGE_NAME == 'apache-airflow':
autoapi_template_dir = 'autoapi_templates'
# A list of patterns to ignore when finding files
autoapi_ignore = [
'airflow/configuration/',
'*/example_dags/*',
'*/_internal*',
'*/node_modules/*',
'*/migrations/*',
'*/contrib/*',
]
if PACKAGE_NAME == 'apache-airflow':
autoapi_ignore.append('*/airflow/providers/*')
# Keep the AutoAPI generated files on the filesystem after the run.
# Useful for debugging.
autoapi_keep_files = True
# Relative path to output the AutoAPI files into. This can also be used to place the generated documentation
# anywhere in your documentation hierarchy.
autoapi_root = f'{PACKAGE_NAME}/_api'
# Whether to insert the generated documentation into the TOC tree. If this is False, the default AutoAPI
# index page is not generated and you will need to include the generated documentation in a
# TOC tree entry yourself.
autoapi_add_toctree_entry = False
# -- Options for ext.exampleinclude --------------------------------------------
exampleinclude_sourceroot = os.path.abspath('..')
# -- Options for ext.redirects -------------------------------------------------
redirects_file = 'redirects.txt'
# -- Options for sphinxcontrib-spelling ----------------------------------------
spelling_word_list_filename = [os.path.join(CONF_DIR, 'spelling_wordlist.txt')]
# -- Options for sphinxcontrib.redoc -------------------------------------------
# See: https://sphinxcontrib-redoc.readthedocs.io/en/stable/
if PACKAGE_NAME == 'apache-airflow':
OPENAPI_FILE = os.path.join(
os.path.dirname(__file__), "..", "airflow", "api_connexion", "openapi", "v1.yaml"
)
redoc = [
{
'name': 'Airflow REST API',
'page': 'stable-rest-api-ref',
'spec': OPENAPI_FILE,
'opts': {
'hide-hostname': True,
'no-auto-auth': True,
},
},
]
# Options for script updater
redoc_script_url = "https://cdn.jsdelivr.net/npm/[email protected]/bundles/redoc.standalone.js"
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/base.py | 1 | 12334 | from __future__ import absolute_import, division, print_function
from functools import partial
from hashlib import md5
from operator import attrgetter
import pickle
import os
import uuid
from toolz import merge, groupby, curry, identity
from toolz.functoolz import Compose
from .compatibility import bind_method, unicode
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', format=None, optimize_graph=False,
**kwargs):
"""
Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` to be installed.
Parameters
----------
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.base.visualize
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
return visualize(self, filename=filename, format=format,
optimize_graph=optimize_graph, **kwargs)
def compute(self, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default
is to check the global settings first, and then fall back to
the collection defaults.
optimize_graph : bool, optional
If True [default], the graph is optimized before computation.
Otherwise the graph is run as is. This can be useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
"""
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys, **kwargs)
return get(dsk2, keys, **kwargs)
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Parameters
----------
args : object
Any number of objects. If the object is a dask collection, it's
computed and the result is returned. Otherwise it's passed through
unchanged.
get : callable, optional
A scheduler ``get`` function to use. If not provided, the default is
to check the global settings first, and then fall back to defaults for
the collections.
optimize_graph : bool, optional
If True [default], the optimizations for each collection are applied
before computation. Otherwise the graph is run as is. This can be
useful for debugging.
kwargs
Extra keywords to forward to the scheduler ``get`` function.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
variables = [a for a in args if isinstance(a, Base)]
if not variables:
return args
get = kwargs.pop('get', None) or _globals['get']
optimizations = (kwargs.pop('optimizations', None) or
_globals.get('optimizations', []))
if not get:
get = variables[0]._default_get
if not all(a._default_get == get for a in variables):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
if kwargs.get('optimize_graph', True):
groups = groupby(attrgetter('_optimize'), variables)
groups = {opt: [merge([v.dask for v in val]),
[v._keys() for v in val]]
for opt, val in groups.items()}
for opt in optimizations:
groups = {k: [opt(dsk, keys), keys]
for k, (dsk, keys) in groups.items()}
dsk = merge([opt(dsk, keys, **kwargs)
for opt, (dsk, keys) in groups.items()])
else:
dsk = merge(var.dask for var in variables)
keys = [var._keys() for var in variables]
results = get(dsk, keys, **kwargs)
results_iter = iter(results)
return tuple(a if not isinstance(a, Base)
else a._finalize(next(results_iter))
for a in args)
def visualize(*args, **kwargs):
"""
Visualize several dask graphs at once.
Requires ``graphviz`` to be installed. All options that are not the dask
graph(s) should be passed as keyword arguments.
Parameters
----------
dsk : dict(s) or collection(s)
The dask graph(s) to visualize.
filename : str or None, optional
The name (without an extension) of the file to write to disk. If
`filename` is None, no file will be written, and we communicate
with dot using only pipes.
format : {'png', 'pdf', 'dot', 'svg', 'jpeg', 'jpg'}, optional
Format in which to write output file. Default is 'png'.
optimize_graph : bool, optional
If True, the graph is optimized before rendering. Otherwise,
the graph is displayed as is. Default is False.
**kwargs
Additional keyword arguments to forward to ``to_graphviz``.
Returns
-------
result : IPython.diplay.Image, IPython.display.SVG, or None
See dask.dot.dot_graph for more information.
See also
--------
dask.dot.dot_graph
Notes
-----
For more information on optimization see here:
http://dask.pydata.org/en/latest/optimize.html
"""
dsks = [arg for arg in args if isinstance(arg, dict)]
args = [arg for arg in args if isinstance(arg, Base)]
filename = kwargs.pop('filename', 'mydask')
optimize_graph = kwargs.pop('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks.extend([arg._optimize(arg.dask, arg._keys()) for arg in args])
else:
dsks.extend([arg.dask for arg in args])
dsk = merge(dsks)
return dot_graph(dsk, filename=filename, **kwargs)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b'__main__' not in result: # abort on dynamic functions
return result
except:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, unicode, bytes, type(None), type,
slice),
identity)
@partial(normalize_token.register, dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@partial(normalize_token.register, (tuple, list, set))
def normalize_seq(seq):
return type(seq).__name__, list(map(normalize_token, seq))
@partial(normalize_token.register, object)
def normalize_object(o):
if callable(o):
return normalize_function(o)
else:
return uuid.uuid4().hex
@partial(normalize_token.register, Base)
def normalize_base(b):
return type(b).__name__, b.key
with ignoring(ImportError):
import pandas as pd
@partial(normalize_token.register, pd.Index)
def normalize_index(ind):
return [ind.name, normalize_token(ind.values)]
@partial(normalize_token.register, pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes),
normalize_token(cat.categories),
cat.ordered]
@partial(normalize_token.register, pd.Series)
def normalize_series(s):
return [s.name, s.dtype,
normalize_token(s._data.blocks[0].values),
normalize_token(s.index)]
@partial(normalize_token.register, pd.DataFrame)
def normalize_dataframe(df):
data = [block.values for block in df._data.blocks]
data += [df.columns, df.index]
return list(map(normalize_token, data))
with ignoring(ImportError):
import numpy as np
@partial(normalize_token.register, np.ndarray)
def normalize_array(x):
if not x.shape:
return (str(x), x.dtype)
if hasattr(x, 'mode') and getattr(x, 'filename', None):
if hasattr(x.base, 'ctypes'):
offset = (x.ctypes.get_as_parameter().value -
x.base.ctypes.get_as_parameter().value)
else:
offset = 0 # root memmap's have mmap object as base
return (x.filename, os.path.getmtime(x.filename), x.dtype,
x.shape, x.strides, offset)
if x.dtype.hasobject:
try:
data = md5('-'.join(x.flat).encode('utf-8')).hexdigest()
except TypeError:
data = md5(b'-'.join([str(item).encode() for item in x.flat])).hexdigest()
else:
try:
data = md5(x.ravel().view('i1').data).hexdigest()
except (BufferError, AttributeError, ValueError):
data = md5(x.copy().ravel().view('i1').data).hexdigest()
return (data, x.dtype, x.shape, x.strides)
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
with ignoring(ImportError):
from collections import OrderedDict
@partial(normalize_token.register, OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
def tokenize(*args, **kwargs):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'7d6a880cd9ec03506eee6973ff551339'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs,)
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
| mit |
RPGOne/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
pprett/statsmodels | statsmodels/sandbox/examples/example_crossval.py | 1 | 2199 |
import numpy as np
from statsmodels.sandbox.tools import cross_val
if __name__ == '__main__':
#A: josef-pktd
import statsmodels.api as sm
from statsmodels.api import OLS
#from statsmodels.datasets.longley import load
from statsmodels.datasets.stackloss import load
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt,
default_latex_fmt, default_html_fmt)
import numpy as np
data = load()
data.exog = sm.tools.add_constant(data.exog)
resols = sm.OLS(data.endog, data.exog).fit()
print '\n OLS leave 1 out'
for inidx, outidx in cross_val.LeaveOneOut(len(data.endog)):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
print data.endog[outidx], res.model.predict(res.params, data.exog[outidx,:]),
print data.endog[outidx] - res.model.predict(res.params, data.exog[outidx,:])
print '\n OLS leave 2 out'
resparams = []
for inidx, outidx in cross_val.LeavePOut(len(data.endog), 2):
res = sm.OLS(data.endog[inidx], data.exog[inidx,:]).fit()
#print data.endog[outidx], res.model.predict(data.exog[outidx,:]),
#print ((data.endog[outidx] - res.model.predict(data.exog[outidx,:]))**2).sum()
resparams.append(res.params)
resparams = np.array(resparams)
print resparams
doplots = 1
if doplots:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.figure()
figtitle = 'Leave2out parameter estimates'
t = plt.gcf().text(0.5,
0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
for i in range(resparams.shape[1]):
plt.subplot(4, 2, i+1)
plt.hist(resparams[:,i], bins = 10)
#plt.title("Leave2out parameter estimates")
plt.show()
for inidx, outidx in cross_val.KStepAhead(20,2):
#note the following were broken because KStepAhead returns now a slice by default
print inidx
print np.ones(20)[inidx].sum(), np.arange(20)[inidx][-4:]
print outidx
print np.nonzero(np.ones(20)[outidx])[0][()] | bsd-3-clause |
priyanshsaxena/techmeet | backup_stuff/text_classification/stocks.py | 2 | 1176 | import urllib,time,datetime,csv
import matplotlib.pyplot as plt
def getPrices(string_symbol):
num_days = 2
url_string = "http://chartapi.finance.yahoo.com/instrument/1.0/{0}/chartdata;type=quote;range={1}d/csv".format(string_symbol,num_days)
csv = urllib.urlopen(url_string).readlines()
# csv.reverse()
x = []
for bar in xrange(0,len(csv)-1):
if(csv[bar].split(",")[0].isdigit()):
csv_tmp,_ = csv[bar].split("\n")
item = map(float,csv_tmp.split(","))
_timestamp = datetime.datetime.fromtimestamp(item[0]) - datetime.timedelta(minutes=570) # Converting IST to EDT
time = datetime.datetime.now() - datetime.timedelta(hours=9,minutes=30) #US time
if(_timestamp.day == time.day):
break
# print _timestamp
_timestamp = str(_timestamp.hour)+":"+str(_timestamp.minute)+":"+str(_timestamp.second)
_close,_high,_low,_open,_volume = item[1:]
x.append([_timestamp,item[0],_close,_high,_low,_open,_volume])
return x
if __name__ == '__main__':
q = getPrices("jpm") # Stock ticker symbol, num_days
with open("output.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(q) | gpl-3.0 |
sserkez/ocelot | demos/optics/ex6.py | 2 | 2247 | '''
free space propagation -- wave
UNDER DEVELOPMENT!
'''
import matplotlib.pyplot as plt
import scipy.integrate as integrate
from ocelot.common.math_op import *
from ocelot.optics.elements import *
from ocelot.optics.wave import *
from ocelot.optics.ray import Ray, trace as trace_ray
from ocelot.gui.optics import *
from numpy import *
m = 1.0
cm = 1.e-2
mm = 1.e-3
mum = 1.e-6
def plot_field(of, title=None):
x = np.linspace(-of.size_x, of.size_x, of.nx) / 1.e-3
y = np.linspace(-of.size_y, of.size_y, of.ny) / 1.e-3
#mu1, mu2, sig1, sig2, _ = fit_gauss_2d(x,y, np.abs(of.mesh.points))
s2 = fwhm(x,np.abs(of.mesh.points[:,of.ny/2]))
print ('beam size:', s2, ' mm [fwhm]')
fig = plt.figure(title)
nx, ny = of.mesh.points.shape
ax = fig.add_subplot(211)
plt.plot(x,np.abs(of[:,ny/2]), lw=3)
ax = fig.add_subplot(212)
plt.plot(x,np.angle(of[:,ny/2]), lw=3)
'''
aperture functions
'''
def f0(x,y, a=2):
sig = 1.0
r = sqrt(x**2 + y**2)
return 1. / (sqrt(2*pi) * sig**2) * exp(-r**a / (2*sig**2))
def f1(x,y):
nsig_cut = 10.0
sig = 1.0e-5 * m
if (x**2 + y**2) > (nsig_cut)**2 * sig**2: return 0.0
return f0(x/sig,y/sig, a=2)
print('initializing field...')
of = ParaxialFieldSlice(lam=5e-9*m, nx=151, ny=151, size_x=0.15*mm, size_y =0.15*mm)
of.init_field(f1)
x = np.linspace(-of.size_x, of.size_x, of.nx)
s2 = fwhm(x,np.abs(of.mesh.points[:,of.ny/2]))
print('w=', of.w , ' s^-1')
print('k=', of.k)
print('lam=', of.lam , ' m')
print('size=',s2 / 1.e-3, ' mm [fwhm]')
print('div~=',of.lam / s2 / 1.e-3, ' mrad [fwhm]')
plot_field(of, title="start")
s = []
z = []
for i in range(10):
dz = 1.0 * m
propagate_fourier(of, obj=None, dz=dz)
x = np.linspace(-of.size_x, of.size_x, of.nx)
y = np.linspace(-of.size_y, of.size_y, of.ny)
s2 = fwhm(x,np.abs(of.mesh.points[:,of.ny/2]))
z.append(dz*i)
s.append(s2)
print('beam size:', s2 / 1.e-3, ' mm [fwhm]')
if s2>of.size_x:
print ('warning: need rescaling', s2, of.size_x)
rescale(of)
#plot_field(of, title=str(i*dz) + 'm')
plot_field(of, title="end")
plt.figure()
plt.plot(z,s)
plt.show()
| gpl-3.0 |
Nacturne/CoreNLP_copy | python_tools/Commands/scoreDistn.py | 2 | 2306 | import config
import argparse
from Core import TreeClass
import pandas as pd
def distnOfScore(dataFrame, combined, index): # Length parameter is handled outside this function so that it can be used to the situation of 'total'
score0 = dataFrame[dataFrame['score'] == 0].shape[0]
score1 = dataFrame[dataFrame['score'] == 1].shape[0]
score2 = dataFrame[dataFrame['score'] == 2].shape[0]
score3 = dataFrame[dataFrame['score'] == 3].shape[0]
score4 = dataFrame[dataFrame['score'] == 4].shape[0]
if combined:
print('{0:8}\t{1:8}\t{2:8}\t{3:8}'.format(index, score0 + score1, score2, score3 + score4))
else:
print('{0:6}\t{1:6}\t{2:6}\t{3:6}\t{4:6}\t{5:6}'.format(index,score0, score1, score2, score3, score4))
parser = argparse.ArgumentParser()
parser.add_argument('I', metavar='input_path', type=str, help="The path of the input file")
parser.add_argument('-c', '--combined',dest='combined', action='store_true', help='If specified, 0+1=neg, 2=neutral, 3+4=pos')
parser.add_argument('-l', '--length',dest='length', type=int, help='Report score distn on different length of phrase, up to the number you specifed')
args = parser.parse_args()
if args.length and args.length <= 0:
raise ValueError("the value of length should be an integer greater than 0")
file_in = open(config.ROOT_DIR + '/' + args.I, 'r')
temp = []
for s in file_in:
tree = TreeClass.ScoreTree(s)
for node in tree.allNodes():
temp.append([int(node.label), node.num_phrases()])
file_in.close()
stats = pd.DataFrame(temp, columns=['score', 'num_phrases'])
print("---------------------------------------------------------")
if args.combined:
print("{0:8}\t{1:8}\t{2:8}\t{3:8}".format('length','neg','neutral', 'pos'))
else:
print('{0:6}\t{1:6}\t{2:6}\t{3:6}\t{4:6}\t{5:6}'.format('length','score0', 'score1', 'score2', 'score3', 'score4'))
print("---------------------------------------------------------")
distnOfScore(stats,args.combined,'total')
phraseOfI = stats[stats['num_phrases'] == 0]
distnOfScore(phraseOfI,args.combined,'words')
if args.length:
for i in range(1,args.length+1):
phraseOfI = stats[stats['num_phrases'] == i]
distnOfScore(phraseOfI,args.combined,i)
print("---------------------------------------------------------")
| gpl-2.0 |
maximdanilchenko/fusionBasedRecSys | final_recommender.py | 1 | 2506 | from metrics import *
from itembased_recommender_system import *
import shelve
import matplotlib.pyplot as plt
import time
genData('base','u2.base')
print('base data ready')
genData('test','u2.test')
print('test data ready')
base = shelve.open('base')
test = shelve.open('test')
print('data opened %d'%len(base))
tr = transform(base)
print('transformed')
t1 = time.clock()
SupMatrix = multipleSupportMatrix(tr,[PCC,CPCC,SPCC,Jaccard,MSD,JMSD,COS,ACOS],'result')
print('time for supmatrix is %f'%(time.clock()-t1))
print('support matrix calculated!!!')
SM = shelve.open('result')
SupMatrix = {}
for i in SM:
SupMatrix[i] = SM[i]
print('SM opened with size%d'%len(SupMatrix))
##minmae = 1
metrix = [JMSD,PCC,CPCC,SPCC,Jaccard,MSD,COS,ACOS]
maes = []
n = 300
# testing all avr sim values (metrics) for all sim and disim values
##for metric in metrix:
## for i in reversed(range(10)):
## for j in range(i+1):
## originalRes = {}
## testRes = {}
## itMS = itemMatrixSup3(tr,n,SupMatrix,j,i,1,metric,0)
## #print('calculating test recommendations..')
## for user in test:
## testRes[user] = {}
## originalRes[user] = {}
## for item in test[user]:
## rec = recommendOne(base,tr,itMS,item,user)
## if (rec != 200):
## testRes[user][item] = rec
## originalRes[user][item] = test[user][item]
## mae = MAE(originalRes,testRes)
## maes.append((mae,j,i,metric.__name__))
## if (mae < minmae):
## print('min MAE is %f for borders (%d,%d) and metric %s'%(mae,j,i,metric.__name__))
## minmae = mae
##
#testing best result
originalRes = {}
testRes = {}
t1 = time.clock()
itMS = itemMatrixSup3(tr,n,SupMatrix,7,9,1,COS,0)
print('time for itemMatrix is %f'%(time.clock()-t1))
print('calculating test recommendations..')
t1 = time.clock()
for user in test:
testRes[user] = {}
originalRes[user] = {}
for item in test[user]:
rec = recommendOne(base,tr,itMS,item,user)
if (rec != 200):
testRes[user][item] = rec
originalRes[user][item] = test[user][item]
print('time is %f for %d recommendations'%((time.clock()-t1),sum(len(test[i]) for i in test)))
mae = MAE(originalRes,testRes)
print('MAE is %f'%mae)
| mit |
casimp/edi12 | bin/scrape_tools.py | 3 | 1691 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 22 20:02:41 2016
@author: casimp
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import pandas as pd
def spec_scrape(folder, save=False):
"""
Runs through a .spc file (located in folder with associated .edf files)
and extracts load, position, and slit size information.
"""
spec_file = sorted([x for x in os.listdir(folder) if x[-4:] == '.spc'])
error = 'Either zero or multiple .spc files have been found.'
assert len(spec_file) == 1, error
spec_file = spec_file[0]
scan = spec_file[:-4]
data_store = []
with open(os.path.join(folder, spec_file), 'r') as f:
lines = [line.rstrip('\n') for line in f][1:]
search = r'-?\ *[0-9]+\.?[0-9]*(?:[Ee]\ *-?\ *[0-9]+)?'
for idx, line in enumerate(lines):
if scan in line:
x, y = [float(i) for i in re.findall(search, lines[idx + 11])[2:4]]
slit_x = [float(i) for i in re.findall(search, lines[idx + 12])][7]
slit_y = [float(i) for i in re.findall(search, lines[idx + 13])][1]
scan_num = [float(i) for i in re.findall(search, lines[idx])][-1]
load = [float(i) for i in re.findall(search, lines[idx + 23])][-3]
data_store.append([int(scan_num), load, x, y, slit_x, slit_y])
df = pd.DataFrame(data_store, columns=('Scan Number', 'Load (kN)',
'x (mm)', 'y (mm)', 'slit_x (mm)', 'slit_y (mm)'))
if save:
pd.to_pickle(df, os.path.join(folder, '%s.pkl' % scan))
return df
| mit |
erdc-cm/air-water-vv | 2d/benchmarks/quiescent_water_probe_benchmark/velocityPlot.py | 1 | 2110 | from numpy import *
from scipy import *
from pylab import *
import collections as cll
import csv
# Put relative path below
filename='combined_gauge.csv'
# Reading file
with open (filename, 'rb') as csvfile:
data=csv.reader(csvfile, delimiter=",")
a=[]
time=[]
probes=[]
nRows=0
for row in data:
# Time steps
if nRows!=0:
time.append(float(row[0]))
# Probes location ######################
if nRows==0: #
for i in row: #
if i!= ' time': #
i=float(i[14:24]) #
probes.append(i) #
########################################
row2=[]
for j in row:
if j!= ' time' and nRows>0.:
j=float(j)
row2.append(j)
a.append(row2)
nRows+=1
#####################################################################################
# Choose which probes to plot
x1 = 1
x2 = 2
u=[]
v=[]
for k in range(1,nRows):
u.append(a[k][x1])
v.append(a[k][x2])
# Plot velocity in time
import matplotlib.pyplot as plt
plt.plot(time,u,'r',label='u')
plt.plot(time,v,'b',label='v')
plt.legend( loc='upper left', numpoints = 1 )
plt.xlabel('time [sec]')
plt.ylabel('velocity [m/s]')
plt.suptitle('Velocity against time at (x,y)=(0.5,0.5)')
plt.xlim((0,1))
plt.ylim((-0.04,0.04))
plt.grid(True)
plt.show()
savefig('Velocity_in_time.png')
#####################################################################################
# Print an output file
info = open('probes.txt','w')
string1=str(probes)
string2=string1.replace('[',' ')
string3=string2.replace(']',' ')
string4=string3.replace(',','\t')
info.write(str('x')+'\t'+string4+'\n')
for j in range(1,nRows):
string5=str(a[j])
string6=string5.replace('[',' ')
string7=string6.replace(']',' ')
string8=string7.replace(',','\t')
info.write(string8+'\n')
info.close()
| mit |
tectronics/dicom-sr-qi | unported scripts/simulate_data2.py | 2 | 2169 | import os, sys
#allow imports of standard srqi modules
srqi_containing_dir = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(srqi_containing_dir)
from srqi.core import my_utils
import numpy as np
import math
from datetime import date
import csv
SIMULATION = 'july'
#SIMULATION = 'improvement'
JULY_EFFECT = .2
WINDOW_SIZE = 400
def add_new_fellow_factor(proc):
start_date = p.get_start_date()
def main():
# get all the Syngo objects
procs, extra_procs = my_utils.get_procs_from_files(["C:\\Users\\mcstrother\\Documents\\Duncan Research\\srqi\\Data\\BJH\\NEW_____Combined months_IR_Syngo_KAR4_All-Exams.xls"])
for p in procs:
if p.has_syngo():
extra_procs.append(p.get_syngo())
syngo_procs = [p for p in extra_procs if not p.fluoro is None]
syngo_procs.sort(key = lambda p:p.get_start_date())
fake_fluoros = np.random.lognormal(math.log(5), .5, (len(syngo_procs)))
for i,p in enumerate(syngo_procs):
p.fluoro = fake_fluoros[i]
for i,p in enumerate(syngo_procs):
if p.rad1 == 'SAAD, N.':
p.rad1 = 'Simulated'
orig_fluoro =p.fluoro
if SIMULATION == 'improvement':
#physician improves 20% over the course of the time period
p.fluoro = p.fluoro * (1.0 - .2*i/len(syngo_procs))
if SIMULATION == 'july':
july = date(p.get_start_date().year, 7,1)
days = (p.get_start_date() - july).days #days since july 1
if days > 0:
p.fluoro = p.fluoro * (1 + JULY_EFFECT * (1-days/182.0))
from srqi.inquiries.operator_improvement import Operator_Improvement
import matplotlib.pyplot as plt
oi_cls = Operator_Improvement
oi_cls.PROCS_PER_WINDOW.set_value(WINDOW_SIZE)
oi_cls.MIN_REPS.set_value(100)
oi = oi_cls([], [], syngo_procs)
# write tables
writer = csv.writer(open('sim_out_w'+str(WINDOW_SIZE)+'j'+str(int(JULY_EFFECT*100))+'.csv', 'wb'))
for t in oi.get_tables():
writer.writerows(t)
oi.get_figures()
#plt.show()
if __name__ == '__main__':
main()
| bsd-2-clause |
jseabold/scikit-learn | examples/calibration/plot_calibration_curve.py | 17 | 5902 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
bloyl/mne-python | mne/io/fieldtrip/utils.py | 3 | 12205 | # -*- coding: UTF-8 -*-
# Authors: Thomas Hartmann <[email protected]>
# Dirk Gütlin <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from .._digitization import DigPoint
from ..constants import FIFF
from ..meas_info import create_info
from ..pick import pick_info
from ...transforms import rotation3d_align_z_axis
from ...utils import warn, _check_pandas_installed
_supported_megs = ['neuromag306']
_unit_dict = {'m': 1,
'cm': 1e-2,
'mm': 1e-3,
'V': 1,
'mV': 1e-3,
'uV': 1e-6,
'T': 1,
'T/m': 1,
'T/cm': 1e2}
NOINFO_WARNING = 'Importing FieldTrip data without an info dict from the ' \
'original file. Channel locations, orientations and types ' \
'will be incorrect. The imported data cannot be used for ' \
'source analysis, channel interpolation etc.'
def _validate_ft_struct(ft_struct):
"""Run validation checks on the ft_structure."""
if isinstance(ft_struct, list):
raise RuntimeError('Loading of data in cell arrays is not supported')
def _create_info(ft_struct, raw_info):
"""Create MNE info structure from a FieldTrip structure."""
if raw_info is None:
warn(NOINFO_WARNING)
sfreq = _set_sfreq(ft_struct)
ch_names = ft_struct['label']
if raw_info:
info = raw_info.copy()
missing_channels = set(ch_names) - set(info['ch_names'])
if missing_channels:
warn('The following channels are present in the FieldTrip data '
'but cannot be found in the provided info: %s.\n'
'These channels will be removed from the resulting data!'
% (str(missing_channels), ))
missing_chan_idx = [ch_names.index(ch) for ch in missing_channels]
new_chs = [ch for ch in ch_names if ch not in missing_channels]
ch_names = new_chs
ft_struct['label'] = ch_names
if 'trial' in ft_struct:
ft_struct['trial'] = _remove_missing_channels_from_trial(
ft_struct['trial'],
missing_chan_idx
)
if 'avg' in ft_struct:
if ft_struct['avg'].ndim == 2:
ft_struct['avg'] = np.delete(ft_struct['avg'],
missing_chan_idx,
axis=0)
info['sfreq'] = sfreq
ch_idx = [info['ch_names'].index(ch) for ch in ch_names]
pick_info(info, ch_idx, copy=False)
else:
info = create_info(ch_names, sfreq)
chs, dig = _create_info_chs_dig(ft_struct)
info.update(chs=chs, dig=dig)
info._update_redundant()
return info
def _remove_missing_channels_from_trial(trial, missing_chan_idx):
if isinstance(trial, list):
for idx_trial in range(len(trial)):
trial[idx_trial] = _remove_missing_channels_from_trial(
trial[idx_trial], missing_chan_idx
)
elif isinstance(trial, np.ndarray):
if trial.ndim == 2:
trial = np.delete(trial,
missing_chan_idx,
axis=0)
else:
raise ValueError('"trial" field of the FieldTrip structure '
'has an unknown format.')
return trial
def _create_info_chs_dig(ft_struct):
"""Create the chs info field from the FieldTrip structure."""
all_channels = ft_struct['label']
ch_defaults = dict(coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1.0,
range=1.0,
unit_mul=FIFF.FIFF_UNITM_NONE,
loc=np.array([0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1]),
unit=FIFF.FIFF_UNIT_V)
try:
elec = ft_struct['elec']
except KeyError:
elec = None
try:
grad = ft_struct['grad']
except KeyError:
grad = None
if elec is None and grad is None:
warn('The supplied FieldTrip structure does not have an elec or grad '
'field. No channel locations will extracted and the kind of '
'channel might be inaccurate.')
if 'chanpos' not in (elec or grad or {'chanpos': None}):
raise RuntimeError(
'This file was created with an old version of FieldTrip. You can '
'convert the data to the new version by loading it into FieldTrip '
'and applying ft_selectdata with an empty cfg structure on it. '
'Otherwise you can supply the Info field.')
chs = list()
dig = list()
counter = 0
for idx_chan, cur_channel_label in enumerate(all_channels):
cur_ch = ch_defaults.copy()
cur_ch['ch_name'] = cur_channel_label
cur_ch['logno'] = idx_chan + 1
cur_ch['scanno'] = idx_chan + 1
if elec and cur_channel_label in elec['label']:
cur_ch = _process_channel_eeg(cur_ch, elec)
# Ref gets ident=0 and we don't have it, so start at 1
counter += 1
d = DigPoint(
r=cur_ch['loc'][:3], coord_frame=FIFF.FIFFV_COORD_HEAD,
kind=FIFF.FIFFV_POINT_EEG, ident=counter)
dig.append(d)
elif grad and cur_channel_label in grad['label']:
cur_ch = _process_channel_meg(cur_ch, grad)
else:
if cur_channel_label.startswith('EOG'):
cur_ch['kind'] = FIFF.FIFFV_EOG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG
elif cur_channel_label.startswith('ECG'):
cur_ch['kind'] = FIFF.FIFFV_ECG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG_BIPOLAR
elif cur_channel_label.startswith('STI'):
cur_ch['kind'] = FIFF.FIFFV_STIM_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
else:
warn('Cannot guess the correct type of channel %s. Making '
'it a MISC channel.' % (cur_channel_label,))
cur_ch['kind'] = FIFF.FIFFV_MISC_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_NONE
chs.append(cur_ch)
return chs, dig
def _set_sfreq(ft_struct):
"""Set the sample frequency."""
try:
sfreq = ft_struct['fsample']
except KeyError:
try:
time = ft_struct['time']
except KeyError:
raise ValueError('No Source for sfreq found')
else:
t1, t2 = float(time[0]), float(time[1])
sfreq = 1 / (t2 - t1)
try:
sfreq = float(sfreq)
except TypeError:
warn('FieldTrip structure contained multiple sample rates, trying the '
f'first of:\n{sfreq} Hz')
sfreq = float(sfreq.ravel()[0])
return sfreq
def _set_tmin(ft_struct):
"""Set the start time before the event in evoked data if possible."""
times = ft_struct['time']
time_check = all(times[i][0] == times[i - 1][0]
for i, x in enumerate(times))
if time_check:
tmin = times[0][0]
else:
raise RuntimeError('Loading data with non-uniform '
'times per epoch is not supported')
return tmin
def _create_events(ft_struct, trialinfo_column):
"""Create an event matrix from the FieldTrip structure."""
if 'trialinfo' not in ft_struct:
return None
event_type = ft_struct['trialinfo']
event_number = range(len(event_type))
if trialinfo_column < 0:
raise ValueError('trialinfo_column must be positive')
available_ti_cols = 1
if event_type.ndim == 2:
available_ti_cols = event_type.shape[1]
if trialinfo_column > (available_ti_cols - 1):
raise ValueError('trialinfo_column is higher than the amount of'
'columns in trialinfo.')
event_trans_val = np.zeros(len(event_type))
if event_type.ndim == 2:
event_type = event_type[:, trialinfo_column]
events = np.vstack([np.array(event_number), event_trans_val,
event_type]).astype('int').T
return events
def _create_event_metadata(ft_struct):
"""Create event metadata from trialinfo."""
pandas = _check_pandas_installed(strict=False)
if not pandas:
warn('The Pandas library is not installed. Not returning the original '
'trialinfo matrix as metadata.')
return None
metadata = pandas.DataFrame(ft_struct['trialinfo'])
return metadata
def _process_channel_eeg(cur_ch, elec):
"""Convert EEG channel from FieldTrip to MNE.
Parameters
----------
cur_ch: dict
Channel specific dictionary to populate.
elec: dict
elec dict as loaded from the FieldTrip structure
Returns
-------
cur_ch: dict
The original dict (cur_ch) with the added information
"""
all_labels = np.asanyarray(elec['label'])
chan_idx_in_elec = np.where(all_labels == cur_ch['ch_name'])[0][0]
position = np.squeeze(elec['chanpos'][chan_idx_in_elec, :])
# chanunit = elec['chanunit'][chan_idx_in_elec] # not used/needed yet
position_unit = elec['unit']
position = position * _unit_dict[position_unit]
cur_ch['loc'] = np.hstack((position, np.zeros((9,))))
cur_ch['unit'] = FIFF.FIFF_UNIT_V
cur_ch['kind'] = FIFF.FIFFV_EEG_CH
cur_ch['coil_type'] = FIFF.FIFFV_COIL_EEG
cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return cur_ch
def _process_channel_meg(cur_ch, grad):
"""Convert MEG channel from FieldTrip to MNE.
Parameters
----------
cur_ch: dict
Channel specific dictionary to populate.
grad: dict
grad dict as loaded from the FieldTrip structure
Returns
-------
dict: The original dict (cur_ch) with the added information
"""
all_labels = np.asanyarray(grad['label'])
chan_idx_in_grad = np.where(all_labels == cur_ch['ch_name'])[0][0]
gradtype = grad['type']
chantype = grad['chantype'][chan_idx_in_grad]
position_unit = grad['unit']
position = np.squeeze(grad['chanpos'][chan_idx_in_grad, :])
position = position * _unit_dict[position_unit]
if gradtype == 'neuromag306' and 'tra' in grad and 'coilpos' in grad:
# Try to regenerate original channel pos.
idx_in_coilpos = np.where(grad['tra'][chan_idx_in_grad, :] != 0)[0]
cur_coilpos = grad['coilpos'][idx_in_coilpos, :]
cur_coilpos = cur_coilpos * _unit_dict[position_unit]
cur_coilori = grad['coilori'][idx_in_coilpos, :]
if chantype == 'megmag':
position = cur_coilpos[0] - 0.0003 * cur_coilori[0]
if chantype == 'megplanar':
tmp_pos = cur_coilpos - 0.0003 * cur_coilori
position = np.average(tmp_pos, axis=0)
original_orientation = np.squeeze(grad['chanori'][chan_idx_in_grad, :])
try:
orientation = rotation3d_align_z_axis(original_orientation).T
except AssertionError:
orientation = np.eye(3)
assert orientation.shape == (3, 3)
orientation = orientation.flatten()
# chanunit = grad['chanunit'][chan_idx_in_grad] # not used/needed yet
cur_ch['loc'] = np.hstack((position, orientation))
cur_ch['kind'] = FIFF.FIFFV_MEG_CH
if chantype == 'megmag':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'megplanar':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_VV_PLANAR_T1
cur_ch['unit'] = FIFF.FIFF_UNIT_T_M
elif chantype == 'refmag':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_MAG
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'refgrad':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_MAGNES_REF_GRAD
cur_ch['unit'] = FIFF.FIFF_UNIT_T
elif chantype == 'meggrad':
cur_ch['coil_type'] = FIFF.FIFFV_COIL_AXIAL_GRAD_5CM
cur_ch['unit'] = FIFF.FIFF_UNIT_T
else:
raise RuntimeError('Unexpected coil type: %s.' % (
chantype,))
cur_ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return cur_ch
| bsd-3-clause |
daniaki/pyPPI | pyppi/data_mining/generic.py | 1 | 11289 | #!/usr/bin/env python
"""
Author: Daniel Esposito
Date: 27/12/2015
Purpose: Generic parsing functions for various file formats and the
functionality to create data frames from the parsing results.
"""
import itertools
import numpy as np
from collections import OrderedDict
from ..base.io import generic_io
from ..base.utilities import remove_duplicates, is_null
from ..base.constants import PUBMED, EXPERIMENT_TYPE
from .tools import process_interactions, make_interaction_frame
INVALID_ACCESSIONS = ['', ' ', '-', 'unknown']
def validate_accession(accession):
"""Return None if an accession is invalid, else strip and uppercase it."""
if accession.strip().lower() in INVALID_ACCESSIONS:
return None
else:
return accession.strip().upper()
def edgelist_func(fp):
"""
Parsing function a generic edgelist file.
fp : :class:`io.TextIOWrapper`
Open file handle containing the file to parse.
Returns
-------
`tuple[str, str, None]`
Source, target and label lists. Label is always a list of `None`
values.
"""
source_idx = 0
target_idx = 1
sources = []
targets = []
labels = []
# Remove header
fp.readline()
for line in fp:
xs = line.strip().split('\t')
source = validate_accession(xs[source_idx].strip().upper())
target = validate_accession(xs[target_idx].strip().upper())
sources.append(source)
targets.append(target)
labels.append(None)
return sources, targets, labels
def bioplex_func(fp):
"""
Parsing function for bioplex tsv format.
fp : :class:`io.TextIOWrapper`
Open file handle containing the file to parse.
Returns
-------
`tuple[str, str, None]`
Source, target and label lists. Label is always a list of `None`
values.
"""
source_idx = 2
target_idx = 3
sources = []
targets = []
labels = []
# Remove header
fp.readline()
for line in fp:
xs = line.strip().split('\t')
source = validate_accession(xs[source_idx].strip().upper())
target = validate_accession(xs[target_idx].strip().upper())
sources.append(source)
targets.append(target)
labels.append(None)
return sources, targets, labels
def pina_sif_func(fp):
"""
Parsing function for bioplex tsv format.
fp : :class:`io.TextIOWrapper`
Open file handle containing the file to parse.
Returns
-------
`tuple[str, str, None]`
Source, target and label lists. Label is always a list of `None`
values.
"""
source_idx = 0
target_idx = 2
sources = []
targets = []
labels = []
for line in fp:
xs = line.strip().split(' ')
source = validate_accession(xs[source_idx].strip().upper())
target = validate_accession(xs[target_idx].strip().upper())
sources.append(source)
targets.append(target)
labels.append(None)
return sources, targets, labels
def innate_mitab_func(fp):
"""
Parsing function for psimitab format files issued by `InnateDB`.
fp : :class:`io.TextIOWrapper`
Open file handle containing the file to parse.
Returns
-------
`tuple[str, str, None, str, str]`
Source, target, label, pubmed and psimi lists. Label is always a list
of `None` values. The other entries may be `None` if invalid values
are enountered.
"""
uniprot_source_idx = 4
uniprot_target_idx = 5
source_idx = 2
target_idx = 3
d_method_idx = 6 # detection method
pmid_idx = 8
sources = []
targets = []
labels = []
pmids = []
experiment_types = []
# Remove header
fp.readline()
for line in fp:
xs = line.strip().split('\t')
ensembl_source = xs[source_idx].strip()
ensembl_target = xs[target_idx].strip()
if ('ENSG' not in ensembl_source) or ('ENSG' not in ensembl_target):
continue
# These formats might contain multiple uniprot interactors in a
# single line, or none. Continue parsing if the latter.
source_ls = [
elem.split(':')[1] for elem in xs[uniprot_source_idx].split('|')
if ('uniprotkb' in elem and not '_' in elem)
]
target_ls = [
elem.split(':')[1] for elem in xs[uniprot_target_idx].split('|')
if ('uniprotkb' in elem) and (not '_' in elem)
]
if len(source_ls) < 1 or len(target_ls) < 1:
continue
d_method_line = xs[d_method_idx].strip()
d_psimi = None
if not is_null(d_method_line):
_, d_method_text = d_method_line.strip().split("psi-mi:")
_, d_psimi, _ = d_method_text.split('"')
if is_null(d_psimi):
d_psimi = None
else:
d_psimi.strip().upper()
pmid_line = xs[pmid_idx].strip()
pmid = None
if not is_null(pmid_line):
pmid = pmid_line.split(':')[-1]
if is_null(pmid):
pmid = None
else:
pmid.strip().upper()
# Iterate through the list of tuples, each tuple being a
# list of accessions found within a line for each of the two proteins.
for source, target in itertools.product(source_ls, target_ls):
source = validate_accession(source)
target = validate_accession(target)
if source is None or target is None:
continue
else:
label = None
sources.append(source)
targets.append(target)
labels.append(label)
pmids.append(pmid)
experiment_types.append(d_psimi)
return sources, targets, labels, pmids, experiment_types
def pina_mitab_func(fp):
"""
Parsing function for psimitab format files from `PINA2`.
fp : :class:`io.TextIOWrapper`
Open file handle containing the file to parse.
Returns
-------
`tuple[str, str, None, str, str]`
Source, target, label, pubmed and psimi lists. Label is always a list
of `None` values. The other entries may be `None` if invalid values
are enountered.
"""
uniprot_source_idx = 0
uniprot_target_idx = 1
d_method_idx = 6 # detection method
pmid_idx = 8
sources = []
targets = []
labels = []
pubmed_ids = []
experiment_types = []
# Remove header
fp.readline()
for line in fp:
xs = line.strip().split('\t')
source = xs[uniprot_source_idx].split(':')[-1].strip().upper()
target = xs[uniprot_target_idx].split(':')[-1].strip().upper()
if is_null(target) or is_null(source):
continue
pmids = [x.split(':')[-1] for x in xs[pmid_idx].strip().split('|')]
psimis = [x.split('(')[0] for x in xs[d_method_idx].strip().split('|')]
assert len(psimis) == len(pmids)
annotations = OrderedDict()
for pmid, psimi in zip(pmids, psimis):
if is_null(pmid):
continue
pmid = pmid.strip().upper()
if not pmid in annotations:
annotations[pmid] = set()
if not is_null(psimi):
psimi = psimi.strip().upper()
annotations[pmid].add(psimi)
pmid_group = ','.join(annotations.keys()) or None
if pmid_group is not None:
for pmid, psimi_group in annotations.items():
annotations[pmid] = '|'.join(sorted(psimi_group)) or str(None)
psimi_groups = ','.join(annotations.values())
else:
psimi_groups = None
label = None
sources.append(source)
targets.append(target)
labels.append(label)
pubmed_ids.append(pmid_group)
experiment_types.append(psimi_groups)
return sources, targets, labels, pubmed_ids, experiment_types
def generic_to_dataframe(f_input, parsing_func, drop_nan=None,
allow_self_edges=False, allow_duplicates=False,
min_label_count=None, merge=False,
exclude_labels=None):
"""
Generic function to parse an interaction file using the supplied parsing
function into a dataframe object.
Parameters
----------
f_input : str
Path to file or a file handle
parsing_func : callable
function that accepts a file pointer object.
drop_nan : bool, str or list, default: None
Drop entries containing null values in any column. If 'default'
rows are dropped if null values occur in the `source`, `target` or
`label` columns. If a list of column names are supplied, then
rows are dropped if null values occur in either of those columns. If
False or None then no rows will be dropped. If True, rows with
a null value in any column are dropped.
allow_self_edges : bool, default: False
If True, removes rows for which `source` is equal to `target`.
allow_duplicates : bool, default: False
If True, removes rows for which `source`, `target` and `label` are the
same. If different annotations are seen in the `pubmed` and `experiment_type`
columns, then these are merged so they are not lost.
min_label_count : int, optional
First computes the counts of labels over all rows, then removes those
rows with labels that have less than the threshold count.
merge : bool, default: False
If True, merges entries with identical source and target columns. If
different annotations are seen in the `pubmed` and `experiment_type`
columns, then these are also merged so they are not lost.
exclude_labels : list, optional
List of labels to remove from the dataframe. All rows with label equal
to any in the supplied labels are removed.
Returns
-------
:class:`pandas.DataFrame`
With 'source', 'target' and 'label', 'pubmed' and 'experiment_type'
columns.
"""
lines = f_input
if isinstance(f_input, str):
lines = generic_io(f_input)
if parsing_func in (pina_mitab_func, innate_mitab_func):
sources, targets, labels, pmids, e_types = parsing_func(lines)
interactions = make_interaction_frame(
sources, targets, labels, pmids, e_types
)
else:
sources, targets, labels = parsing_func(lines)
interactions = make_interaction_frame(sources, targets, labels)
interactions = process_interactions(
interactions=interactions,
drop_nan=drop_nan,
allow_self_edges=allow_self_edges,
allow_duplicates=allow_duplicates,
exclude_labels=exclude_labels,
min_counts=min_label_count,
merge=merge
)
return interactions
| mit |
sirfoga/pygce | setup.py | 1 | 1037 | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
DESCRIPTION = \
'pygce\n\n\
A tool to export, save, and analyze your Garmin Connect data.\n\
\n\
Install\n\n\
- $ python3 setup.py (install from source)\n\
\n\
Questions and issues\n\n\
The github issue tracker is only for bug reports and feature requests.'
VERSION = open('VERSION').readlines()[0]
VERSION_NUMBER = VERSION.split(' ')[0]
setup(
name='pygce',
version=VERSION_NUMBER,
author='sirfoga',
author_email='[email protected]',
description='pygce is an unofficial Garmin Connect data exporter.',
long_description=DESCRIPTION,
keywords='garmin data parser',
url='https://github.com/sirfoga/pygce',
packages=find_packages(),
entry_points={
'console_scripts': [
'pygce = pygce.cli:main'
]
},
install_requires=[
'bs4',
'pyhal',
'lxml',
'numpy',
'sklearn',
'selenium'
]
)
| mit |
siavooshpayandehazad/SoCDep2 | src/main/python/RoutingAlgorithms/RoutingGraph_Reports.py | 2 | 4357 | # Copyright (C) 2015 Siavoosh Payandeh Azad
from re import search
from ArchGraphUtilities.AG_Functions import return_node_location
import matplotlib.pyplot as plt
from networkx import draw
from ConfigAndPackages import Config
import matplotlib.patches as patches
def report_turn_model(turn_model):
"""
prints the turn model for a 2D network in the console
Only usable if there is a uniform Turn model over the network
:param turn_model: set of allowed turns in a 2D network
:return: None
"""
print("\tUSING TURN MODEL: ", turn_model)
return None
def draw_rg(rg):
"""
draws routing graph in GraphDrawings/RG.png
:param rg: routing graph
:return: None
"""
print("===========================================")
print("GENERATING ROUTING GRAPH VISUALIZATION...")
line_width = 2
pos = {}
color_list = []
plt.figure(figsize=(10*Config.ag.x_size, 10*Config.ag.y_size))
distance = 100*Config.ag.z_size
step = (distance*0.8)/Config.ag.z_size
for node in rg.nodes():
chosen_node = int(search(r'\d+', node).group())
location = return_node_location(chosen_node)
circle1 = plt.Circle((location[0]*distance+step*location[2], location[1]*distance+step*location[2]),
radius=35, color='#8ABDFF', fill=False, lw=line_width)
plt.gca().add_patch(circle1)
circle2 = plt.Circle((location[0]*distance+step*location[2]+45, location[1]*distance+step*location[2]-50),
radius=10, color='#FF878B', fill=False, lw=line_width)
plt.gca().add_patch(circle2)
plt.text(location[0]*distance+step*location[2]-30, location[1]*distance+step*location[2]+30,
str(chosen_node), fontsize=45)
offset_x = 0
offset_y = 0
if 'N' in node:
offset_y += 30
if 'I'in node:
color_list.append('#CFECFF')
offset_x += 12
else:
color_list.append('#FF878B')
offset_x -= 12
elif 'S' in node:
offset_y -= 30
if 'I'in node:
color_list.append('#CFECFF')
offset_x -= 12
else:
color_list.append('#FF878B')
offset_x += 12
elif 'W' in node:
offset_x -= 30
if 'I'in node:
color_list.append('#CFECFF')
offset_y += 12
else:
color_list.append('#FF878B')
offset_y -= 12
elif 'E' in node:
offset_x += 30
if 'I'in node:
color_list.append('#CFECFF')
offset_y -= 12
else:
color_list.append('#FF878B')
offset_y += 12
if 'L' in node:
if 'I'in node:
color_list.append('#CFECFF')
offset_x += 44
offset_y -= 56
else:
color_list.append('#FF878B')
offset_x += 48
offset_y -= 48
if 'U' in node:
offset_y = 16
if 'I'in node:
color_list.append('#CFECFF')
offset_x -= 15
else:
color_list.append('#FF878B')
offset_x += 15
if 'D' in node:
offset_y = -16
if 'I'in node:
color_list.append('#CFECFF')
offset_x -= 15
else:
color_list.append('#FF878B')
offset_x += 15
pos[node] = [location[0]*distance+offset_x+step*location[2], location[1]*distance+offset_y+step*location[2]]
draw(rg, pos, with_labels=False, arrows=False, node_size=140, node_color=color_list,
linewidths=line_width, width=line_width)
plt.text(0, -100, 'X', fontsize=15)
plt.text(-100, 0, 'Y', fontsize=15)
plt.text(-45, -45, 'Z', fontsize=15)
plt.gca().add_patch(patches.Arrow(-100, -100, 100, 0, width=10))
plt.gca().add_patch(patches.Arrow(-100, -100, 50, 50, width=10))
plt.gca().add_patch(patches.Arrow(-100, -100, 0, 100, width=10))
plt.savefig("GraphDrawings/RG.png", dpi=100)
plt.clf()
print("\033[35m* VIZ::\033[0mROUTING GRAPH DRAWING CREATED AT: GraphDrawings/RG.png")
return None
| gpl-2.0 |
cathyyul/sumo-0.18 | tools/net/visum_mapDistricts.py | 2 | 13960 | #!/usr/bin/env python
"""
@file visum_mapDistricts.py
@author Daniel Krajzewicz
@author Michael Behrisch
@date 2007-10-25
@version $Id: visum_mapDistricts.py 14425 2013-08-16 20:11:47Z behrisch $
This script reads a network and a dump file and
draws the network, coloring it by the values
found within the dump-file.
SUMO, Simulation of Urban MObility; see http://sumo-sim.org/
Copyright (C) 2008-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os, string, sys, StringIO
import math
from optparse import OptionParser
from matplotlib.collections import LineCollection
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import sumolib.net
import netshiftadaptor
def computeDistance(n1, n2):
xd = n1._coord[0]-n2._coord[0]
yd = n1._coord[1]-n2._coord[1]
return math.sqrt(xd*xd + yd*yd)
def relAngle(angle1, angle2):
angle2 -= angle1;
if angle2>180:
angle2 = (360. - angle2) * -1.;
while angle2<-180:
angle2 = 360 + angle2;
return angle2;
# initialise
optParser = OptionParser()
optParser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="tell me what you are doing")
# i/o
optParser.add_option("-1", "--net1", dest="net1",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-2", "--net2", dest="net2",
help="SUMO network to use (mandatory)", metavar="FILE")
optParser.add_option("-a", "--nodes1", dest="nodes1",
help="The first matching nodes", metavar="NODELIST")
optParser.add_option("-b", "--nodes2", dest="nodes2",
help="The second matching nodes", metavar="NODELIST")
# parse options
(options, args) = optParser.parse_args()
# read networks
if options.verbose:
print "Reading net#1..."
net1 = sumolib.net.readNet(options.net1)
if options.verbose:
print "Reading net#2..."
net2 = sumolib.net.readNet(options.net2)
# reproject the visum net onto the navteq net
adaptor = netshiftadaptor.NetShiftAdaptor(net1, net2, options.nodes1.split(","), options.nodes2.split(","))
adaptor.reproject(options.verbose)
# build a speed-up grid
xmin = 100000
xmax = -100000
ymin = 100000
ymax = -100000
for n in net1._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
for n in net2._nodes:
xmin = min(xmin, n._coord[0])
xmax = max(xmax, n._coord[0])
ymin = min(ymin, n._coord[1])
ymax = max(ymax, n._coord[1])
xmin = xmin - .1
xmax = xmax + .1
ymin = ymin - .1
ymax = ymax + .1
CELLSIZE = 100
arr1 = []
arr2 = []
for y in range(0, CELLSIZE):
arr1.append([])
arr2.append([])
for x in range(0, CELLSIZE):
arr1[-1].append([])
arr2[-1].append([])
cw = (xmax-xmin)/float(CELLSIZE)
ch = (ymax-ymin)/float(CELLSIZE)
for n in net2._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr1[int(cy)][int(cx)].append(n)
for n in net1._nodes:
cx = (n._coord[0] - xmin) / cw
cy = (n._coord[1] - ymin) / ch
arr2[int(cy)][int(cx)].append(n)
# map
nmap1to2 = {}
nmap2to1 = {}
nodes1 = net2._nodes
nodes2 = net1._nodes
highwayNodes2 = set()
highwaySinks2 = set()
highwaySources2 = set()
urbanNodes2 = set()
for n2 in nodes2:
noIncoming = 0
noOutgoing = 0
for e in n2._outgoing:
if e.getSpeed()>80./3.6 and e.getSpeed()<99:
highwayNodes2.add(n2)
if e.getSpeed()<99:
noOutgoing = noOutgoing + 1
for e in n2._incoming:
if e.getSpeed()>80./3.6 and e.getSpeed()<99:
highwayNodes2.add(n2)
if e.getSpeed()<99:
noIncoming = noIncoming + 1
if n2 in highwayNodes2:
if noOutgoing==0:
highwaySinks2.add(n2)
if noIncoming==0:
highwaySources2.add(n2)
else:
urbanNodes2.add(n2)
print "Found " + str(len(highwaySinks2)) + " highway sinks in net2"
cont = ""
for n in highwaySinks2:
cont = cont + n._id + ", "
print cont
cont = ""
print "Found " + str(len(highwaySources2)) + " highway sources in net2"
for n in highwaySources2:
cont = cont + n._id + ", "
print cont
fdd = open("dconns.con.xml", "w")
fdd.write("<connections>\n");
highwaySinks1 = set()
highwaySources1 = set()
origDistrictNodes = {}
nnn = {}
for n1 in nodes1:
if n1._id.find('-', 1)<0:
continue
# if n1._id.find("38208387")<0:
# continue
un1 = None
for e in n1._outgoing:
un1 = e._to
for e in n1._incoming:
un1 = e._from
d = n1._id[:n1._id.find('-', 1)]
if d[0]=='-':
d = d[1:]
if d not in origDistrictNodes:
origDistrictNodes[d] = []
if options.verbose:
print "District: " + d
isHighwayNode = False
isHighwaySink = False
isHighwaySource = False
noIncoming = 0
noOutgoing = 0
noInConns = 0
noOutConns = 0
for e in un1._outgoing:
if e.getSpeed()>80./3.6 and e.getSpeed()<99:
isHighwayNode = True
if e.getSpeed()<99:
noOutgoing = noOutgoing + 1
if e.getSpeed()>99:
noOutConns = noOutConns + 1
for e in un1._incoming:
if e.getSpeed()>80./3.6 and e.getSpeed()<99:
isHighwayNode = True
if e.getSpeed()<99:
noIncoming = noIncoming + 1
if e.getSpeed()>99:
noInConns = noInConns + 1
if options.verbose:
print "Check", un1._id, noOutgoing, noIncoming
if isHighwayNode:
if noOutgoing==0:
highwaySinks1.add(n1)
isHighwaySink = True
if noIncoming==0:
highwaySources1.add(n1)
isHighwaySource = True
# the next is a hack for bad visum-networks
if noIncoming==1 and noOutgoing==1 and noInConns==1 and noOutConns==1:
highwaySinks1.add(n1)
isHighwaySink = True
highwaySources1.add(n1)
isHighwaySource = True
best = None
bestDist = -1
check = urbanNodes2
if n1 in highwaySinks1:
check = highwaySinks2
elif n1 in highwaySources1:
check = highwaySources2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if bestDist==-1 or bestDist>dist:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
if options.verbose:
print "a: " + d + "<->" + best._id
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
preBest = best
best = None
bestDist = -1
check = []
if n1 in highwaySinks1 or preBest in highwaySinks2:
check = highwaySources2
elif n1 in highwaySources1 or preBest in highwaySources2:
check = highwaySinks2
elif isHighwayNode:
check = highwayNodes2
for n2 in check:
dist = computeDistance(un1, n2)
if (bestDist==-1 or bestDist>dist) and n2!=preBest:
best = n2
bestDist = dist
if best:
nnn[best] = n1
if d not in nmap1to2:
nmap1to2[d] = []
if best not in nmap1to2[d]:
nmap1to2[d].append(best)
if best not in nmap2to1:
nmap2to1[best] = []
if n1 not in nmap2to1[best]:
nmap2to1[best].append(n1)
print "b: " + d + "<->" + best._id
if best not in origDistrictNodes[d]:
origDistrictNodes[d].append(best)
if options.verbose:
print "Found " + str(len(highwaySinks1)) + " highway sinks in net1"
for n in highwaySinks1:
print n._id
print "Found " + str(len(highwaySources1)) + " highway sources in net1"
for n in highwaySources1:
print n._id
connectedNodesConnections = {}
for d in nmap1to2:
for n2 in nmap1to2[d]:
if n2 in connectedNodesConnections:
continue
n1i = net1.addNode("i" + n2._id, nnn[n2]._coord)
n1o = net1.addNode("o" + n2._id, nnn[n2]._coord)
haveIncoming = False
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0]!="i" and e._id[0]!="o":
haveIncoming = True
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
haveOutgoing = False
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0]!="i" and e._id[0]!="o":
haveOutgoing = True
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
if haveIncoming:
e1 = net1.addEdge("o" + n2._id, n2._id, n1o._id, -2)
if haveOutgoing:
net1.addLane(e1, 20, 100.)
else:
for i in range(0, incomingLaneNo):
net1.addLane(e1, 20, 100.)
if len(n2._incoming)==1:
fdd.write(' <connection from="' + n2._incoming[0]._id + '" to="' + e1._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
if haveOutgoing:
if options.verbose:
print "has outgoing"
e2 = net1.addEdge("i" + n2._id, n1i._id, n2._id, -2)
if haveIncoming:
net1.addLane(e2, 20, 100.)
else:
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 20, 100.)
if len(n2._outgoing)==1:
fdd.write(' <connection from="' + e2._id + '" to="' + n2._outgoing[0]._id + '" lane="' + str(i) + ':' + str(i) + '"/>\n')
connectedNodesConnections[n2] = [ n1i, n1o ]
newDistricts = {}
districtSources = {}
districtSinks = {}
mappedDistrictNodes = {}
connNodes = {}
dRemap = {}
for d in nmap1to2:
newDistricts[d] = []
if len(nmap1to2[d])==1:
n = nmap1to2[d][0]
if n in dRemap:
districtSources[d] = districtSources[dRemap[n]]
districtSinks[d] = districtSinks[dRemap[n]]
newDistricts[d] = []
newDistricts[d].append(n._id)
continue
else:
dRemap[n] = d
[ ni, no ] = connectedNodesConnections[n]
if len(ni._outgoing)>0:
districtSources[d] = ni._outgoing[0]._id
if len(no._incoming)>0:
districtSinks[d] = no._incoming[0]._id
fdd.write(' <connection from="' + no._incoming[0]._id + '"/>\n')
else:
incomingLaneNoG = 0
outgoingLaneNoG = 0
for n in nmap1to2[d]:
for e in n._incoming:
if e._id[0]!="i" and e._id[0]!="o":
incomingLaneNoG = incomingLaneNoG + e.getLaneNumber()
for e in n._outgoing:
if e._id[0]!="i" and e._id[0]!="o":
outgoingLaneNoG = outgoingLaneNoG + e.getLaneNumber()
p1 = [ 0, 0 ]
p11 = [ 0, 0 ]
p12 = [ 0, 0 ]
p2 = [ 0, 0 ]
for n in nmap1to2[d]:
p1[0] = p1[0] + n._coord[0]
p1[1] = p1[1] + n._coord[1]
p2[0] = p2[0] + nnn[n]._coord[0]
p2[1] = p2[1] + nnn[n]._coord[1]
p2[0] = (p1[0] + p2[0]) / float(len(origDistrictNodes[d])*2)
p2[1] = (p1[1] + p2[1]) / float(len(origDistrictNodes[d])*2)
dn2i = net1.addNode("cci" + d, p2)
dn2o = net1.addNode("cci" + d, p2)
p11[0] = p1[0] / float(len(origDistrictNodes[d]))
p11[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1o = net1.addNode("co" + d, p11)
e1 = net1.addEdge("co" + d, dn1o._id, dn2o._id, -2)
for i in range(0, incomingLaneNoG):
net1.addLane(e1, 22, 100.)
districtSinks[d] = e1._id
p12[0] = p1[0] / float(len(origDistrictNodes[d]))
p12[1] = p1[1] / float(len(origDistrictNodes[d]))
dn1i = net1.addNode("ci" + d, p12)
e2 = net1.addEdge("ci" + d, dn2i._id, dn1i._id, -2)
for i in range(0, outgoingLaneNoG):
net1.addLane(e2, 21, 100.)
districtSources[d] = e2._id
runningOutLaneNumber = 0
runningInLaneNumber = 0
for n2 in nmap1to2[d]:
[ ni, no ] = connectedNodesConnections[n2]
print "In: " + ni._id + " " + str(len(ni._incoming)) + " " + str(len(ni._outgoing))
print "Out: " + no._id+ " " + str(len(no._incoming)) + " " + str(len(no._outgoing))
if len(no._incoming)>0:
incomingLaneNo = 0
for e in n2._incoming:
if e._id[0]!="i" and e._id[0]!="o":
incomingLaneNo = incomingLaneNo + e.getLaneNumber()
e1 = net1.addEdge("o" + d + "#" + n2._id, no._id, dn1o._id, -2)
for i in range(0, incomingLaneNo):
net1.addLane(e1, 19, 100.)
fdd.write(' <connection from="' + "o" + d + "#" + n2._id + '" to="' + dn1o._outgoing[0]._id + '" lane="' + str(i) + ':' + str(runningOutLaneNumber) + '"/>\n')
runningOutLaneNumber = runningOutLaneNumber + 1
fdd.write(' <connection from="' + dn1o._outgoing[0]._id + '"/>\n')
if incomingLaneNo==0:
net1.addLane(e1, 19, 100.)
runningOutLaneNumber = runningOutLaneNumber + 1
if len(ni._outgoing)>0:
outgoingLaneNo = 0
for e in n2._outgoing:
if e._id[0]!="i" and e._id[0]!="o":
outgoingLaneNo = outgoingLaneNo + e.getLaneNumber()
e2 = net1.addEdge("i" + d + "#" + n2._id, dn1i._id, ni._id, -2)
for i in range(0, outgoingLaneNo):
net1.addLane(e2, 18, 100.)
fdd.write(' <connection from="' + dn1i._incoming[0]._id + '" to="' + "i" + d + "#" + n2._id + '" lane="' + str(runningInLaneNumber) + ':' + str(i) + '"/>\n')
runningInLaneNumber = runningInLaneNumber + 1
if outgoingLaneNo==0:
net1.addLane(e2, 18, 100.)
runningInLaneNumber = runningInLaneNumber + 1
fd = open("districts.xml", "w")
fd.write("<tazs>\n")
for d in newDistricts:
fd.write(' <taz id="' + d + '">\n')
if d in districtSources:
fd.write(' <tazSource id="' + districtSources[d]+ '" weight="1"/>\n')
if d in districtSinks:
fd.write(' <tazSink id="' + districtSinks[d] + '" weight="1"/>\n')
fd.write(' </taz>\n')
fd.write("</tazs>\n")
fd.close()
def writeNode(fd, node):
fd.write(" <node id=\"" + node._id + "\" x=\"" + str(node._coord[0]) + "\" y=\"" + str(node._coord[1]) + "\"/>\n")
def writeEdge(fd, edge, withGeom=True):
fd.write(" <edge id=\"" + edge._id + "\" fromNode=\"" + edge._from._id + "\" toNode=\"" + edge._to._id)
fd.write("\" speed=\"" + str(edge._speed))
fd.write("\" priority=\"" + str(edge._priority))
if withGeom:
fd.write("\" spreadType=\"center")
fd.write("\" numLanes=\"" + str(len(edge._lanes)) + "\"")
shape = edge.getShape()
if withGeom:
fd.write(" shape=\"")
for i,c in enumerate(shape):
if i!=0:
fd.write(" ")
fd.write(str(c[0]) + "," + str(c[1]))
fd.write("\"")
fd.write("/>\n")
def writeNodes(net):
fd = open("nodes.xml", "w")
fd.write("<nodes>\n")
for node in net._nodes:
writeNode(fd, node)
fd.write("</nodes>\n")
fd.close()
def writeEdges(net):
fd = open("edges.xml", "w")
fd.write("<edges>\n")
for edge in net._edges:
if edge._id.find("#")>0 or edge._id.find("c")>=0 or edge._id.find("i")>=0:
writeEdge(fd, edge, False)
else:
writeEdge(fd, edge)
fd.write("</edges>\n")
fd.close()
fdd.write("</connections>\n");
writeNodes(net1)
writeEdges(net1)
| gpl-3.0 |
rvraghav93/scikit-learn | examples/linear_model/plot_lasso_dense_vs_sparse_data.py | 54 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
# #############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
# #############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
hachikuji/kafka | system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
Averroes/statsmodels | statsmodels/examples/l1_demo/short_demo.py | 33 | 3737 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
from __future__ import print_function
from statsmodels.compat.python import range
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print("============ Results for Logit =================")
print("ML results")
print(logit_res.summary())
print("l1 results")
print(logit_l1_res.summary())
print(logit_l1_cvxopt_res.summary())
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print(mlogit_l1_res.params)
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print("============ Results for MNLogit =================")
print("ML results")
print(mlogit_res.summary())
print("l1 results")
print(mlogit_l1_res.summary())
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in range(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
ltiao/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <[email protected]>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
cauchycui/scikit-learn | sklearn/neighbors/approximate.py | 128 | 22351 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest()
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=None)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[i], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[i], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
arabenjamin/pybrain | examples/rl/valuebased/nfq.py | 25 | 1973 | from __future__ import print_function
#!/usr/bin/env python
__author__ = 'Thomas Rueckstiess, [email protected]'
from pybrain.rl.environments.cartpole import CartPoleEnvironment, DiscreteBalanceTask, CartPoleRenderer
from pybrain.rl.agents import LearningAgent
from pybrain.rl.experiments import EpisodicExperiment
from pybrain.rl.learners.valuebased import NFQ, ActionValueNetwork
from pybrain.rl.explorers import BoltzmannExplorer
from numpy import array, arange, meshgrid, pi, zeros, mean
from matplotlib import pyplot as plt
# switch this to True if you want to see the cart balancing the pole (slower)
render = False
plt.ion()
env = CartPoleEnvironment()
if render:
renderer = CartPoleRenderer()
env.setRenderer(renderer)
renderer.start()
module = ActionValueNetwork(4, 3)
task = DiscreteBalanceTask(env, 100)
learner = NFQ()
learner.explorer.epsilon = 0.4
agent = LearningAgent(module, learner)
testagent = LearningAgent(module, None)
experiment = EpisodicExperiment(task, agent)
def plotPerformance(values, fig):
plt.figure(fig.number)
plt.clf()
plt.plot(values, 'o-')
plt.gcf().canvas.draw()
# Without the next line, the pyplot plot won't actually show up.
plt.pause(0.001)
performance = []
if not render:
pf_fig = plt.figure()
while(True):
# one learning step after one episode of world-interaction
experiment.doEpisodes(1)
agent.learn(1)
# test performance (these real-world experiences are not used for training)
if render:
env.delay = True
experiment.agent = testagent
r = mean([sum(x) for x in experiment.doEpisodes(5)])
env.delay = False
testagent.reset()
experiment.agent = agent
performance.append(r)
if not render:
plotPerformance(performance, pf_fig)
print("reward avg", r)
print("explorer epsilon", learner.explorer.epsilon)
print("num episodes", agent.history.getNumSequences())
print("update step", len(performance))
| bsd-3-clause |
lewislone/mStocks | gadget/sdhub/tushare/datayes/market.py | 17 | 15547 | # -*- coding:utf-8 -*-
"""
通联数据
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: [email protected]
"""
from pandas.compat import StringIO
import pandas as pd
from tushare.util import vars as vs
from tushare.util.common import Client
from tushare.util import upass as up
class Market():
def __init__(self, client=None):
if client is None:
self.client = Client(up.get_token())
else:
self.client = client
def TickRTSnapshot(self, securityID='', field=''):
"""
高频数据,获取一只或多只证券最新Level1股票信息。 输入一只或多只证券代码,
如000001.XSHG (上证指数) 或000001.XSHE(平安银行), 还有所选字段, 得到证券的最新交易快照。
证券可以是股票,指数, 部分债券或 基金。
"""
code, result = self.client.getData(vs.TICKRTSNAPSHOT%(securityID, field))
return _ret_data(code, result)
def TickRTSnapshotIndex(self, securityID='', field=''):
"""
高频数据,获取一个指数的成分股的最新Level1股票信息。
输入一个指数的证券代码,如000001.XSHG (上证指数) 或000300.XSHG(沪深300),
还有所选字段, 得到指数成分股的最新交易快照。
"""
code, result = self.client.getData(vs.TICKRTSNAPSHOTINDEX%(securityID, field))
return _ret_data(code, result)
def FutureTickRTSnapshot(self, instrumentID='', field=''):
"""
高频数据,获取一只或多只期货的最新市场信息快照
"""
code, result = self.client.getData(vs.FUTURETICKRTSNAPSHOT%(instrumentID, field))
return _ret_data(code, result)
def TickRTIntraDay(self, securityID='', endTime='', startTime='', field=''):
"""
高频数据,获取一只证券当日内时间段的Level1信息。 证券可以是股票,指数, 部分债券或 基金。
"""
code, result = self.client.getData(vs.TICKRTINTRADAY%(securityID, endTime, startTime, field))
return _ret_data(code, result)
def BarRTIntraDay(self, securityID='', endTime='', startTime='', field=''):
"""
高频数据,获取一只证券当日的分钟线信息。
输入一只证券代码,如000001.XSHE(平安银行), 得到此证券的当日的分钟线。
证券目前是股票,指数,基金和部分债券。分钟线的有效数据上午从09:30 到11:30,下午从13:01到15:00
"""
code, result = self.client.getData(vs.BARRTINTRADAY%(securityID, endTime, startTime, field))
return _ret_data(code, result)
def BarHistIntraDay(self, securityID='', date='', endTime='', startTime='', field=''):
"""
高频数据,获取一只证券历史的分钟线信息。
输入一只证券代码,如000001.XSHE(平安银行), 得到此证券的当日的分钟线。
证券目前是股票,指数,基金和部分债券。分钟线的有效数据上午从09:30 到11:30,下午从13:01到15:00
"""
code, result = self.client.getData(vs.BARHISTONEDAY%(securityID, date, endTime, startTime, field))
return _ret_data(code, result)
def BarHistDayRange(self, securityID='', startDate='', endDate='', field=''):
code, result = self.client.getData(vs.BARHISTDAYRANGE%(securityID, startDate, endDate, field))
return _ret_data(code, result)
def FutureTickRTIntraDay(self, instrumentID='', endTime='', startTime='', field=''):
"""
高频数据,获取一只期货在本清算日内某时间段的行情信息
"""
code, result = self.client.getData(vs.FUTURETICKRTINTRADAY%(instrumentID, endTime, startTime, field))
return _ret_data(code, result)
def FutureBarsOneDay(self, instrumentID='', date='', field=''):
code, result = self.client.getData(vs.FUTUREBARINDAY%(instrumentID, date, field))
return _ret_data(code, result)
def FutureBarsDayRange(self, instrumentID='', startDate='', endDate='', field=''):
code, result = self.client.getData(vs.FUTUREBARDATERANGE%(instrumentID, startDate, endDate, field))
return _ret_data(code, result)
def StockFactorsOneDay(self, tradeDate='', secID='', ticker='', field=''):
"""
高频数据,获取多只股票历史上某一天的因子数据
"""
code, result = self.client.getData(vs.STOCKFACTORSONEDAY%(tradeDate, secID, ticker, field))
return _ret_data(code, result)
def StockFactorsDateRange(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
高频数据,获取一只股票历史上某一时间段的因子数据
"""
code, result = self.client.getData(vs.STOCKFACTORSDATERANGE%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def SecTips(self, tipsTypeCD='', field=''):
"""
上海证券交易所、深圳证券交易所今日停复牌股票列表。数据更新频率:日。
"""
code, result = self.client.getData(vs.SECTIPS%(tipsTypeCD, field))
return _ret_data(code, result)
def BarRTIntraDayOneMinute(self, time='', field=''):
"""
获取所有股票某一分钟的分钟线
"""
code, result = self.client.getData(vs.BARRTINTRADAYONEMINUTE%(time, field))
return _ret_data(code, result)
def EquRTRank(self, desc='', exchangeCD='', field=''):
"""
获取沪深股票涨跌幅排行
"""
code, result = self.client.getData(vs.EQURTRANK%(desc, exchangeCD, field))
return _ret_data(code, result)
def MktMFutd(self, contractMark='', contractObject='', mainCon='', tradeDate='', endDate='',
startDate='', field=''):
"""
获取四大期货交易所主力合约、上海黄金交易所黄金(T+D)、白银(T+D)以及国外主要期货连续合约行情信息。 历史追溯至2006年,每日16:00更新。
"""
code, result = self.client.getData(vs.MKTMFUTD%(contractMark, contractObject, mainCon,
tradeDate, endDate, startDate, field))
return _ret_data(code, result)
def OptionTickRTSnapshot(self, optionId='', field=''):
"""
高频数据,获取期权最新市场信息快照
"""
code, result = self.client.getData(vs.OPTIONTICKRTSNAPSHOT%(optionId, field))
return _ret_data(code, result)
def FutureBarRTIntraDay(self, instrumentID='', endTime='', startTime='', field=''):
"""
高频数据,获取当日期货分钟线
"""
code, result = self.client.getData(vs.FUTUREBARRTINTRADAY%(instrumentID, endTime, startTime, field))
return _ret_data(code, result)
def IndustryTickRTSnapshot(self, securityID='', field=''):
"""
获取行业(证监会行业标准)资金流向,内容包括小单成交金额、中单成交金额、大单成交金额、超大单成交金额、本次成交单总金额等。
"""
code, result = self.client.getData(vs.INDUSTRYTICKRTSNAPSHOT%(securityID, field))
return _ret_data(code, result)
def MktEqudLately(self, field=''):
"""
获取沪深股票个股最近一次日行情,默认日期区间是过去1年,包含昨收价、开盘价、最高价、最低价、收盘价、成交量、成交金额等字段,每日15:30更新
"""
code, result = self.client.getData(vs.MKTEQUDLATELY%(field))
return _ret_data(code, result)
def MktEqud(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取沪深AB股日行情信息,默认日期区间是过去1年,包含昨收价、开盘价、最高价、最低价、收盘价、成交量、成交金额等字段,每日15:30更新
"""
code, result = self.client.getData(vs.MKTEQUD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktHKEqud(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取香港交易所股票开、收、高、低,成交等日行情信息,每日17:00前更新
"""
code, result = self.client.getData(vs.MKTHKEQUD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktBondd(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取债券交易开、收、高、低,成交等日行情信息,每日16:00前更新
"""
code, result = self.client.getData(vs.MKTBONDD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktRepod(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取债券回购交易开、收、高、低,成交等日行情信息,每日16:00前更新
"""
code, result = self.client.getData(vs.MKTREPOD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktFundd(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取基金买卖交易开、收、高、低,成交等日行情信息,每日16:00前更新。
"""
code, result = self.client.getData(vs.MKTFUNDD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktFutd(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取四大期货交易所期货合约、上海黄金交易所黄金(T+D)、白银(T+D)以及国外主要期货连续合约行情信息。 默认日期区间是过去一年。日线数据第一次更新为交易结束后(如遇线路不稳定情况数据可能存在误差),第二次更新为18:00pm,其中主力合约是以连续三个交易日持仓量最大为基准计算的。
"""
code, result = self.client.getData(vs.MKTFUTD%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktFutMTR(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取期货会员在各交易日期货合约的成交量、成交排名及成交量增减信息,每日16:00前更新。
"""
code, result = self.client.getData(vs.MKTFUTMTR%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def MktFutMSR(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取期货会员在各交易日期货合约的空头持仓、排名及空头持仓增减信息,每日16:00前更新。
"""
code, result = self.client.getData(vs.MKTFUTMSR%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def MktFutMLR(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取期货会员在各交易日期货合约的多头持仓、排名及多头持仓增减信息,每日16:00前更新。
"""
code, result = self.client.getData(vs.MKTFUTMLR%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def MktIdxd(self, indexID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取指数日线行情信息,包含昨收价、开盘价、最高价、最低价、收盘价、成交量、成交金额等字段,默认日期区间是过去1年,其中沪深指数行情每日15:30更新。
"""
code, result = self.client.getData(vs.MKTIDXD%(indexID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktBlockd(self, secID='', ticker='', tradeDate='', assetClass='', beginDate='', endDate='', field=''):
"""
获取沪深交易所交易日大宗交易成交价,成交量等信息。
"""
code, result = self.client.getData(vs.MKTBLOCKD%(secID, ticker, tradeDate, assetClass, beginDate, endDate, field))
return _ret_data(code, result)
def MktOptd(self, optID='', secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
主要记录上交所期权行情,包含昨结算、昨收盘、开盘价、最高价、最低价、收盘价、结算价、成交量、成交金额、持仓量等字段,每日16:00前更新。
"""
code, result = self.client.getData(vs.MKTOPTD%(optID, secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktEqudAdj(self, secID='', ticker='', tradeDate='', beginDate='', endDate='', field=''):
"""
获取获取沪深A股和B股前复权日行情信息,包含前复权昨收价、前复权开盘价、前复权最高价、前复权最低价、前复权收盘价,每日开盘前更新数据。
"""
code, result = self.client.getData(vs.MKTEQUDADJ%(secID, ticker, tradeDate, beginDate, endDate, field))
return _ret_data(code, result)
def MktAdjf(self, secID='', ticker='', field=''):
"""
获取获取沪深A股和B股用来调整行情的前复权因子数据,包含除权除息日、除权除息事项具体数据、本次复权因子、累积复权因子以及因子调整的截止日期。该因子用来调整历史行情,不作为预测使用,于除权除息日进行计算调整。
"""
code, result = self.client.getData(vs.MKTADJF%(secID, ticker, field))
return _ret_data(code, result)
def MktFutdVol(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
获取四大期货交易所期货合约行情信息。 默认日期区间是过去一年。日线数据第一次更新为交易结束后(如遇线路不稳定情况数据可能存在误差),第二次更新为18:00pm,其中主力合约是以成交量最大为基准计算的。
"""
code, result = self.client.getData(vs.MKTFUTDVOL%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def MktLimit(self, secID='', ticker='', tradeDate='', field=''):
"""
主要记录盘前每日个股及基金涨跌停板价格,每日9:00更新
"""
code, result = self.client.getData(vs.MKTLIMIT%(secID, ticker, tradeDate, field))
return _ret_data(code, result)
def MktFunddAdjAf(self, secID='', ticker='', beginDate='', endDate='', field=''):
"""
主要记录基金每日后复权行情,包括开高低收、成交量、成交价格等
"""
code, result = self.client.getData(vs.MKTFUNDDADJAF%(secID, ticker, beginDate, endDate, field))
return _ret_data(code, result)
def _ret_data(code, result):
if code==200:
result = result.decode('utf-8') if vs.PY3 else result
df = pd.read_csv(StringIO(result))
return df
else:
print(result)
return None
| mit |
pdamodaran/yellowbrick | tests/test_features/test_base.py | 1 | 1571 | # tests.test_features.test_base
# Tests for the feature selection and analysis base classes
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Fri Oct 07 13:43:55 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_base.py [2e898a6] [email protected] $
"""
Tests for the feature selection and analysis base classes
"""
##########################################################################
## Imports
##########################################################################
from yellowbrick.base import Visualizer
from yellowbrick.features.base import FeatureVisualizer
from tests.base import VisualTestCase
from sklearn.base import BaseEstimator, TransformerMixin
##########################################################################
## FeatureVisualizer Base Tests
##########################################################################
class FeatureVisualizerBaseTests(VisualTestCase):
def test_subclass(self):
"""
Assert the feature visualizer is in its rightful place
"""
visualizer = FeatureVisualizer()
self.assertIsInstance(visualizer, TransformerMixin)
self.assertIsInstance(visualizer, BaseEstimator)
self.assertIsInstance(visualizer, Visualizer)
# def test_interface(self):
# """
# Test the feature visualizer interface
# """
#
# visualizer = FeatureVisualizer()
# with self.assertRaises(NotImplementedError):
# visualizer.poof()
| apache-2.0 |
simon-pepin/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
daniel-muthukrishna/EmissionLineAnalysis | setup.py | 1 | 3479 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='FitELP',
version='0.1.0',
description='A powerful Python code to perform spectral emission line fits with multiple gaussian components in echelle or long-slit data. ',
url='https://github.com/daniel-muthukrishna/EmissionLineAnalysis',
author='Daniel Muthukrishna',
author_email='[email protected]',
license='MIT',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='spectra emission spectral line fitting gaussian continuum echelle long-slit',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['numpy', 'matplotlib', 'uncertainties', 'lmfit==0.9.10', 'astropy'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.rst', '*.md'],
},
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=run_analysis:main',
],
},
)
| mit |
michaelpacer/pyhawkes | examples/inference/vb_demo.py | 2 | 5282 | import numpy as np
import os
import cPickle
import gzip
# np.seterr(all='raise')
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeStandardHawkesModel
init_with_map = True
do_plot = False
def demo(seed=None):
"""
Fit a weakly sparse
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K4_C1_T1000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
if init_with_map:
init_len = T
print "Initializing with BFGS on first ", init_len, " time bins."
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test weak spike-and-slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers,
network_hypers=network_hypers)
test_model.add_data(S)
# F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
###########################################################
# Fit the test model with variational Bayesian inference
###########################################################
# VB coordinate descent
N_iters = 100
vlbs = []
samples = []
for itr in xrange(N_iters):
vlbs.append(test_model.meanfield_coordinate_descent_step())
print "VB Iter: ", itr, "\tVLB: ", vlbs[-1]
if itr > 0:
if (vlbs[-2] - vlbs[-1]) > 1e-1:
print "WARNING: VLB is not increasing!"
# Resample from variational distribution and plot
test_model.resample_from_mf()
samples.append(test_model.copy_sample())
###########################################################
# Analyze the samples
###########################################################
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
vlbs = np.array(vlbs)
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
# Plot the VLBs
plt.figure()
plt.plot(np.arange(N_samples), vlbs, 'k')
plt.xlabel("Iteration")
plt.ylabel("VLB")
plt.show()
# Compute the link prediction accuracy curves
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.show()
plt.ioff()
plt.show()
demo(11223344)
| mit |
hmendozap/auto-sklearn | autosklearn/pipeline/components/regression/extra_trees.py | 1 | 6869 | import numpy as np
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, \
UnParametrizedHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm
from autosklearn.pipeline.constants import *
class ExtraTreesRegressor(AutoSklearnRegressionAlgorithm):
def __init__(self, n_estimators, criterion, min_samples_leaf,
min_samples_split, max_features,
max_leaf_nodes_or_max_depth="max_depth",
bootstrap=False, max_leaf_nodes=None, max_depth="None",
oob_score=False, n_jobs=1, random_state=None, verbose=0):
self.n_estimators = int(n_estimators)
self.estimator_increment = 10
if criterion not in ("mse"):
raise ValueError("'criterion' is not in ('mse'): "
"%s" % criterion)
self.criterion = criterion
if max_leaf_nodes_or_max_depth == "max_depth":
self.max_leaf_nodes = None
if max_depth == "None":
self.max_depth = None
else:
self.max_depth = int(max_depth)
#if use_max_depth == "True":
# self.max_depth = int(max_depth)
#elif use_max_depth == "False":
# self.max_depth = None
else:
if max_leaf_nodes == "None":
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(max_leaf_nodes)
self.max_depth = None
self.min_samples_leaf = int(min_samples_leaf)
self.min_samples_split = int(min_samples_split)
self.max_features = float(max_features)
if bootstrap == "True":
self.bootstrap = True
elif bootstrap == "False":
self.bootstrap = False
self.oob_score = oob_score
self.n_jobs = int(n_jobs)
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y, refit=False):
if self.estimator is None or refit:
self.iterative_fit(X, y, n_iter=1, refit=refit)
while not self.configuration_fully_fitted():
self.iterative_fit(X, y, n_iter=1)
return self
def iterative_fit(self, X, y, n_iter=1, refit=False):
from sklearn.ensemble import ExtraTreesRegressor as ETR
if refit:
self.estimator = None
if self.estimator is None:
num_features = X.shape[1]
max_features = int(
float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
self.estimator = ETR(
n_estimators=0, criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features, max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score, n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
warm_start=True
)
tmp = self.estimator # TODO copy ?
tmp.n_estimators += n_iter
tmp.fit(X, y,)
self.estimator = tmp
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
return not len(self.estimator.estimators_) < self.n_estimators
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'ET',
'name': 'Extra Trees Regressor',
'handles_regression': True,
'handles_classification': False,
'handles_multiclass': False,
'handles_multilabel': False,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
n_estimators = cs.add_hyperparameter(Constant("n_estimators", 100))
criterion = cs.add_hyperparameter(Constant("criterion", "mse"))
max_features = cs.add_hyperparameter(UniformFloatHyperparameter(
"max_features", 0.5, 5, default=1))
max_depth = cs.add_hyperparameter(
UnParametrizedHyperparameter(name="max_depth", value="None"))
min_samples_split = cs.add_hyperparameter(UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default=2))
min_samples_leaf = cs.add_hyperparameter(UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default=1))
# Unparametrized, we use min_samples as regularization
# max_leaf_nodes_or_max_depth = UnParametrizedHyperparameter(
# name="max_leaf_nodes_or_max_depth", value="max_depth")
# CategoricalHyperparameter("max_leaf_nodes_or_max_depth",
# choices=["max_leaf_nodes", "max_depth"], default="max_depth")
# min_weight_fraction_leaf = UniformFloatHyperparameter(
# "min_weight_fraction_leaf", 0.0, 0.1)
# max_leaf_nodes = UnParametrizedHyperparameter(name="max_leaf_nodes",
# value="None")
bootstrap = cs.add_hyperparameter(CategoricalHyperparameter(
"bootstrap", ["True", "False"], default="False"))
# Conditions
# Not applicable because max_leaf_nodes is no legal value of the parent
#cond_max_leaf_nodes_or_max_depth = \
# EqualsCondition(child=max_leaf_nodes,
# parent=max_leaf_nodes_or_max_depth,
# value="max_leaf_nodes")
#cond2_max_leaf_nodes_or_max_depth = \
# EqualsCondition(child=use_max_depth,
# parent=max_leaf_nodes_or_max_depth,
# value="max_depth")
#cond_max_depth = EqualsCondition(child=max_depth, parent=use_max_depth,
#value="True")
#cs.add_condition(cond_max_leaf_nodes_or_max_depth)
#cs.add_condition(cond2_max_leaf_nodes_or_max_depth)
#cs.add_condition(cond_max_depth)
return cs
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
voxlol/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 253 | 4158 | """
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <[email protected]>
# Peter Prettenhofer <[email protected]>
# Mathieu Blondel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
tjhei/burnman_old2 | example_optimize_slb2011.py | 2 | 2241 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
"""
This example is under construction.
requires:
teaches:
"""
import os, sys, numpy as np, matplotlib.pyplot as plt
import scipy.optimize as opt
import burnman
from burnman import minerals
#hack to allow scripts to be placed in subdirectories next to burnman:
if not os.path.exists('burnman') and os.path.exists('../burnman'):
sys.path.insert(1,os.path.abspath('..'))
def calculate_forward_problem(frac, pressures):
print frac
rock = burnman.composite( [ (minerals.SLB_2011.mg_perovskite(),frac[2]*frac[0] ),
(minerals.SLB_2011.fe_perovskite(), frac[2]*(1.0-frac[0]) ),
(minerals.SLB_2011.periclase(), (1.0-frac[2])) ,
(minerals.SLB_2011.wuestite(), 0.0*(1.0-frac[2])*(1.0-frac[0]) ) ] )
rock.set_method('slb3')
temperature = burnman.geotherm.self_consistent(pressures, frac[1], rock)
mat_rho, mat_vp, mat_vs, mat_vphi, mat_K, mat_G = burnman.velocities_from_rock(rock,pressures, temperature)
return mat_rho,mat_vs, mat_vphi
def error(guess, pressures, obs_rho, obs_vs, obs_vphi):
[rho,vs,vphi] = calculate_forward_problem(guess, pressures )
vs_l2 = [ (vs[i] - obs_vs[i])*(vs[i] - obs_vs[i])/(obs_vs[i]*obs_vs[i]) for i in range(len(obs_vs)) ]
vphi_l2 = [ (vphi[i] - obs_vphi[i])*(vphi[i] - obs_vphi[i])/(obs_vphi[i]*obs_vphi[i]) for i in range(len(obs_vphi)) ]
rho_l2=[(rho[i] - obs_rho[i])*(rho[i]-obs_rho[i])/(obs_rho[i]*obs_rho[i]) for i in range(len(obs_rho)) ]
l2_error= sum(vphi_l2)+sum(vs_l2)+ sum(rho_l2)
print "current error:", l2_error
return l2_error
pressures=np.linspace(30e9,120e9,20)
prem = burnman.seismic.prem()
depths = map(prem.depth,pressures)
seis_p, prem_density, prem_vp, prem_vs, prem_vphi = prem.evaluate_all_at(depths)
#make the mineral to fit
guess = [0.95,2100,0.65]
lowerbounds=[0.5,0, 0,0,1800]
upperbounds=[1.0,0.2,0.5,0.2,2100]
#first, do the second-order fit
func = lambda x : error( x, pressures, prem_density, prem_vs, prem_vphi)
sol = opt.fmin(func, guess, xtol=0.8)
print sol
| gpl-2.0 |
adiIspas/Machine-Learning_A-Z | Machine Learning A-Z/Part 9 - Dimensionality Reduction/Section 44 - Linear Discriminant Analysis (LDA)/lda_me.py | 5 | 2836 | # LDA
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Wine.csv')
X = dataset.iloc[:, 0:13].values
y = dataset.iloc[:, 13].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Applying LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components = 2)
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('LD1')
plt.ylabel('LD2')
plt.legend()
plt.show() | mit |
rrozewsk/OurProject | Curves/PortfolioLoss/PortfolioLossCalculations.py | 1 | 18270 | from scipy.stats import norm, mvn #normal and bivariate normal
from numpy import sqrt
from numpy import exp
from datetime import date, timedelta
import datetime as dt
import pandas as pd
import numpy as np
from parameters import x0Vas
from Curves.Corporates.CorporateDaily import CorporateRates
from Products.Credit.CDS import CDS
from Scheduler.Scheduler import Scheduler
from scipy.integrate import quad
from Curves.PortfolioLoss.ExactFunc import ExactFunc
from parameters import xR,t_step, simNumber
from Boostrappers.CDSBootstrapper.CDSVasicekBootstrapper import BootstrapperCDSLadder
from MonteCarloSimulators.Vasicek.vasicekMCSim import MC_Vasicek_Sim
class PortfolioLossCalculation(object):
def __init__(self, K1, K2, Fs, Rs, betas,start_dates,end_dates,freqs,coupons,referenceDates,ratings,bootstrap):
self.bootstrap = bootstrap
self.K1 = K1
self.K2 = K2
self.Fs= Fs
self.Rs = Rs
self.betas = betas
self.referenceDates = referenceDates
self.freqs = freqs
self.coupons = coupons
self.start_dates = start_dates
self.end_dates = end_dates
self.ratings = ratings
self.R = Rs[0]
self.beta = betas[0]
self.referenceDate = referenceDates[0]
self.freq = freqs[0]
self.coupon = coupons[0]
self.start_date = start_dates[0]
self.end_date = end_dates[0]
self.rating = ratings[0]
self.maturity = end_dates[0]
self.myScheduler = Scheduler()
def setParameters(self,R,rating,coupon,beta,start_date,referenceDate,end_date,freq):
self.R = R
self.beta = beta
self.referenceDate = referenceDate
self.freq = freq
self.coupon = coupon
self.start_date = start_date
self.end_date = end_date
self.rating = rating
self.maturity = end_date
def getQ(self,start_date,referenceDate,end_date,freq,coupon,rating,R):
## Use CorporateDaily to get Q for referencedates ##
# print("GET Q")
# print(self.portfolioScheduleOfCF)
if self.bootstrap:
print("Q bootstrap")
CDSClass = CDS(start_date=start_date, end_date=end_date, freq=freq, coupon=coupon,
referenceDate=referenceDate,rating=rating, R=R)
myLad = BootstrapperCDSLadder(start=self.start_date, freq=[freq], CDSList=[CDSClass],
R=CDSClass.R).getXList(x0Vas)[freq]
self.Q1M = MC_Vasicek_Sim(x=myLad, t_step = 1 / 365,
datelist=[CDSClass.referenceDate, CDSClass.end_date],simNumber=simNumber).getLibor()[0]
print(self.Q1M)
else:
myQ = CorporateRates()
myQ.getCorporatesFred(trim_start=referenceDate, trim_end=end_date)
## Return the calculated Q(t,t_i) for bonds ranging over maturities for a given rating
daterange = pd.date_range(start=referenceDate, end=end_date).date
myQ = myQ.getCorporateQData(rating=rating, datelist=daterange, R=R)
Q1M = myQ[freq]
print(Q1M)
return(Q1M)
def Q_lhp(self,t, K1, K2, R, beta, Q):
"""Calculates the Tranche survival curve for the LHP model.
Args:
T (datetime.date): Should be given in "days" (no hours, minutes, etc.)
K1 (float): The starting value of the tranche. Its value should be
between 0 & 1.
K2 (float): The final value of the tranche.
beta (float): Correlation perameter. Its value should be between 0 and 1.
R (float): Recovery rate. Usually between 0 and 1.
Q (callable function): A function Q that takes in a dateime.date and
outputs a float from 0 to 1. It is assumed that Q(0) = 1 and Q is
decreasing. Represents survival curve of each credit.
"""
if Q(t) == 1:
return 1 # prevents infinity
def emin(K):
# Calculates E[min(L(T), K)] in LHP model
C = norm.ppf(1 - Q(t))
A = (1 / beta) * (C - sqrt(1 - beta * beta) * norm.ppf(K / (1 - R)))
return (1 - R) * mvn.mvndst(upper=[C, -1 * A],
lower=[0, 0],
infin=[0, 0], # set lower bounds = -infty
correl=-1 * beta)[1] + K * norm.cdf(A)
return 1 - (emin(K2) - emin(K1)) / (K2 - K1)
def Q_gauss(self,t, K1, K2, Fs, Rs, betas, Qs):
""" Calculate the tranche survival probability in the Gaussian heterogenous model.
Arguments:
t (float): Time. Positive.
K1 (float): Starting tranche value. Between 0 and 1.
K2 (float): Ending tranche value. Between 0 and 1.
Fs (list): List of fractional face values for each credit. Each entry must be
between 0 and 1.
Rs (list): List of recovery rates for each credit. Each entry must be between
0 and 1.
betas (list): Correlation perameters for each credit. Each entry must be between
0 and 1.
Qs (list): Survival curves for each credit. Each entry must be a callable function
that takes in a datetime.date argument and returns a number from 0 to 1.
"""
Cs = [norm.ppf(1 - q(t)) for q in Qs]
N = len(Fs)
def f(K, z):
ps = [norm.cdf((C - beta * z) / sqrt(1 - beta ** 2)) for C, beta in zip(Cs, betas)]
mu = 1 / N * sum([p * F * (1 - R) for p, F, R in zip(ps, Fs, Rs)])
sigma_squared = 1 / N / N * sum([p * (1 - p) * F ** 2 * (1 - R) ** 2
for p, F, R in zip(ps, Fs, Rs)])
sigma = sqrt(sigma_squared)
return -1 * sigma * norm.pdf((mu - K) / sigma) - (mu - K) * norm.cdf((mu - K) / sigma)
emin = lambda K: quad(lambda z: norm.pdf(z) * f(K, z), -10, 10)[0]
return 1 - (emin(K2) - emin(K1)) / (K2 - K1)
def Q_adjbinom(self,t, K1, K2, Fs, Rs, betas, Qs):
""" Calculates the tranche survival probability under the adjusted
binomial model.
Arguments:
t (datetime.date): Time.
K1 (float): Starting tranche value (0 to 1).
K2 (float): Final tranche value (0 to 1).
Fs (list): List of fractional face values (floats) for each credit.
Rs (list): List of recovery rates (floats) for each credit.
betas (list): List of correlation perameters
Qs (list): List of survival probabilities. These are callable functions that
takes in a single datetime.date argument and returns a float.
Returns:
float: The value of the tranche survival curve.
"""
if Qs[0](t) == 1:
return 1.0 # initial value -- avoids weird nan return
N = len(Fs)
Cs = [norm.ppf(1 - Q(t)) for Q in Qs]
L = sum([(1 - R) * F for R, F in zip(Rs, Fs)]) / N
def choose(n, k): # Calculates binomial coeffecient: n choose k.
if k == 0 or k == n:
return 1
return choose(n - 1, k - 1) + choose(n - 1, k)
def g(k, z):
ps = [norm.cdf((C - beta * z) / sqrt(1 - beta * beta)) for C, beta in zip(Cs, betas)]
p_avg = sum([(1 - R) * F / L * p for R, F, p in zip(Rs, Fs, ps)]) / N
f = lambda k: choose(N, k) * p_avg ** k * (1 - p_avg) ** (N - k)
vA = p_avg * (1 - p_avg) / N
vE = 1 / N / N * sum([((1 - R) * F / L) ** 2 * p * (1 - p) for R, F, p in zip(Rs, Fs, ps)])
m = p_avg * N
l = int(m)
u = l + 1
o = (u - m) ** 2 + ((l - m) ** 2 - (u - m) ** 2) * (u - m)
alpha = (vE * N + o) / (vA * N + o)
if k == l:
return f(l) + (1 - alpha) * (u - m)
if k == u:
return f(u) - (1 - alpha) * (l - m)
return alpha * f(k)
I = lambda k: quad(lambda z: norm.pdf(z) * g(k, z), -10, 10)[0]
emin = lambda K: sum([I(k) * min(L * k, K) for k in range(0, N + 1)])
return 1 - (emin(K2) - emin(K1)) / (K2 - K1)
def gcd(self,a, b):
if a * b == 0:
return max(a, b)
if a < b:
return self.gcd(a, b % a)
return self.gcd(a % b, b)
def Q_exact(self,t, K1, K2, Fs, Rs, betas, Qs):
Cs = [norm.ppf(1 - Q(t)) for Q in Qs]
N = len(Fs)
g = round(3600 * Fs[0] * (1 - Rs[0]))
for j in range(1, N):
g = self.gcd(g, round(3600 * Fs[j] * (1 - Rs[j])))
g = g / 3600
ns = [round(F * (1 - R) / g) for F, R in zip(Fs, Rs)]
def f(j, k, z):
if (j, k) == (0, 0):
return 1.0
if j == 0:
return 0.0
ps = [norm.cdf((C - beta * z) / sqrt(1 - beta ** 2)) for C, beta in zip(Cs, betas)]
if k < ns[j - 1]:
return f(j - 1, k, z) * (1 - ps[j - 1])
return f(j - 1, k, z) * (1 - ps[j - 1]) + f(j - 1, k - ns[j - 1], z) * ps[j - 1]
I = [quad(lambda z: norm.pdf(z) * f(N, k, z), -12, 12)[0] for k in range(sum(ns))]
emin = lambda K: sum([I[k] * min(k * g, K) for k in range(sum(ns))])
return 1 - (emin(K2) - emin(K1)) / (K2 - K1)
def getZ_Vasicek(self):
### Get Z(t,t_i) for t_i in datelist #####
## Simulate Vasicek model with paramters given in workspace.parameters
# xR = [5.0, 0.05, 0.01, 0.05]
# kappa = x[0]
# theta = x[1]
# sigma = x[2]
# r0 = x[3]
vasicekMC = MC_Vasicek_Sim(datelist=[self.referenceDate, self.end_date], x=xR, simNumber=10, t_step=t_step)
self.myZ = vasicekMC.getLibor()
self.myZ = self.myZ.loc[:, 0]
def getScheduleComplete(self):
datelist = self.myScheduler.getSchedule(start=self.start_date, end=self.maturity, freq=self.freq,
referencedate=self.referenceDate)
ntimes = len(datelist)
fullset = list(sorted(list(set(datelist)
.union([self.referenceDate])
.union([self.start_date])
.union([self.maturity])
.union([self.referenceDate])
)))
return fullset, datelist
def getPremiumLegZ(self,myQ):
Q1M = myQ
# Q1M = self.myQ["QTranche"]
fulllist, datelist = self.getScheduleComplete()
portfolioScheduleOfCF = fulllist
timed = portfolioScheduleOfCF[portfolioScheduleOfCF.index(self.referenceDate):]
Q1M = Q1M.loc[timed]
zbarPremLeg = self.myZ / self.myZ.loc[self.referenceDate]
zbarPremLeg = zbarPremLeg.loc[timed]
## Calculate Q(t_i) + Q(t_(i-1))
Qplus = []
out = 0
for i in range(1, len(Q1M)):
out = out + (Q1M[(i - 1)] + Q1M[i]) * float((timed[i] - timed[i - 1]).days / 365) * zbarPremLeg[i]
zbarPremLeg = pd.DataFrame(zbarPremLeg, index=timed)
# print("Premium LEg")
PVpremiumLeg = out * (1 / 2)
# print(PVpremiumLeg)
## Get coupon bond ###
print(PVpremiumLeg)
return PVpremiumLeg
# //////////////// Get Protection leg Z(t_i)( Q(t_(i-1)) - Q(t_i) )
def getProtectionLeg(self,myQ):
print("Protection Leg ")
Q1M = myQ
#Qminus = np.gradient(Q1M)
zbarProtectionLeg = self.myZ
out = 0
for i in range(1,zbarProtectionLeg.shape[0]):
#out = -Qminus[i] * zbarProtectionLeg.iloc[i]*float(1/365)
out = out -(Q1M.iloc[i]-Q1M.iloc[i-1]) * zbarProtectionLeg.iloc[i]
## Calculate the PV of the premium leg using the bond class
print(out)
return out
def CDOPortfolio(self):
self.setParameters(R=self.Rs[0], rating = self.ratings[0], coupon=self.coupons[0],
beta = self.betas[0], start_date = start_dates[0],
referenceDate = referenceDates[0], end_date = end_dates[0], freq = self.freqs[0])
## price CDOs using LHP
Q_now1 = self.getQ(start_date = self.start_dates[0],referenceDate=self.referenceDates[0],
end_date = self.end_dates[0],freq = self.freqs[0],coupon=self.coupons[0],
rating = self.ratings[0],R = self.Rs[0])
## Estimate default probabilites from Qtranche list ###
## Assume that lambda is constant over a small period of time
def getApproxDefaultProbs(Qvals, freq, tvalues):
t_start = tvalues[0]
delay = self.myScheduler.extractDelay(freq)
delay_days = ((t_start + delay) - t_start).days
## Estimate constant lambda
lam = -(1 / delay_days) * np.log(Qvals[t_start])
Qvals = [((Qvals[t_start] * exp(-lam * (t - t_start).days)) / Qvals[t]) for t in tvalues]
return (Qvals)
########################################################
print(Q_now1)
### Create Q dataframe
tvalues = Q_now1.index.tolist()
Cs = pd.Series(Q_now1,index=tvalues)
for cds_num in range(1,len(Fs)):
Q_add = self.getQ(start_date = self.start_dates[cds_num],referenceDate=self.referenceDates[cds_num],
end_date = self.end_dates[cds_num],freq = self.freqs[cds_num],coupon=self.coupons[cds_num],
rating = self.ratings[cds_num],R = self.Rs[cds_num])
Q_add = pd.Series(Q_add,index = tvalues)
Cs = pd.concat([Cs,Q_add],axis = 1)
def expdecay(n):
return lambda t: Cs.ix[t,n]
##
#Qs = [expdecay(0),expdecay(1)]
Qs = [expdecay(n) for n in range(0,Cs.shape[1])]
self.getZ_Vasicek()
###### LHP Method #####################################################################
Rs_mean = np.mean(self.Rs)
betas_mean = np.mean(betas)
lhpcurve = [self.Q_lhp(t, self.K1, self.K2, R = Rs[0], beta = betas[0], Q = Qs[0]) for t in tvalues]
lhpcurve = pd.Series(lhpcurve, index=tvalues)
lhpcurve = getApproxDefaultProbs(lhpcurve,freq=self.freq,tvalues=tvalues)
lhpcurve = pd.Series(lhpcurve, index=tvalues)
ProtectionLeg = self.getProtectionLeg(myQ = lhpcurve)
PremiumLeg = self.getPremiumLegZ(myQ = lhpcurve)
spreads = ProtectionLeg / PremiumLeg
print("The spread for LHP is: ", 10000 * spreads, ".")
########################################################################################
###### Gaussian Method #####################################################################
print('Gaussian progression: ', end="")
gaussiancurve = [self.Q_gauss(t, self.K1, self.K2, Fs = self.Fs, Rs =self.Rs, betas=self.betas, Qs=Qs) for t in tvalues]
gaussiancurve = pd.Series(gaussiancurve, index=tvalues)
gaussiancurve = getApproxDefaultProbs(gaussiancurve, freq=self.freq, tvalues=tvalues)
gaussiancurve = pd.Series(gaussiancurve, index=tvalues)
ProtectionLeg = self.getProtectionLeg(myQ=gaussiancurve)
PremiumLeg = self.getPremiumLegZ(myQ=gaussiancurve)
spreads = ProtectionLeg / PremiumLeg
print("The spread for Gaussian is: ", 10000 * spreads, ".")
########################################################################################
###### Adjusted Binomial Method #####################################################################
adjustedbinomialcurve = [self.Q_adjbinom(t, self.K1, self.K2, Fs = self.Fs, Rs = self.Rs, betas=self.betas, Qs=Qs) for t in tvalues]
adjustedbinomialcurve = pd.Series(adjustedbinomialcurve, index=tvalues)
adjustedbinomialcurve = getApproxDefaultProbs(adjustedbinomialcurve, freq=self.freq, tvalues=tvalues)
adjustedbinomialcurve = pd.Series(adjustedbinomialcurve, index=tvalues)
#adjustedbinomialcurve = adjustedbinomialcurve.to_frame(self.freqs[0])
ProtectionLeg = self.getProtectionLeg(myQ=adjustedbinomialcurve)
PremiumLeg = self.getPremiumLegZ(myQ=adjustedbinomialcurve)
spreads = ProtectionLeg / PremiumLeg
print("The spread for Ajusted Binomial is: ", 10000 * spreads, ".")
########################################################################################
###### Exact Method #####################################################################
exactcurve = [self.Q_exact(t, self.K1, self.K2, Fs =self.Fs, Rs = self.Rs, betas =self.betas, Qs =Qs) for t in tvalues]
exactcurve = pd.Series(exactcurve, index=tvalues)
exactcurve = getApproxDefaultProbs(exactcurve, freq=self.freq, tvalues=tvalues)
exactcurve = pd.Series(exactcurve, index=tvalues)
#exactcurve = exactcurve.to_frame(self.freqs[0])
ProtectionLeg = self.getProtectionLeg(myQ=exactcurve)
PremiumLeg = self.getPremiumLegZ(myQ=exactcurve)
spreads = ProtectionLeg / PremiumLeg
print("The spread for Exact is: ", 10000 * spreads, ".")
########################################################################################
K1 = 0.00001
K2 = 0.03
Fs = [0.3, 0.5,0.2]
Rs = [0.40, 0.40,0.40]
betas = [0.30, 0.30,0.30]
bootstrap = True
# Should assume same start,reference and end_dates
start_dates = [date(2012, 2, 28), date(2012, 2, 28),date(2012, 2, 28)]
referenceDates = [date(2013, 1, 20), date(2013, 1, 20), date(2013, 1, 20)]
end_dates = [date(2013, 12, 31), date(2013, 12, 31),date(2013, 12, 31)]
ratings = ["AAA", "AAA","AAA"]
freqs = ["3M", "3M","3M"]
coupons = [1,1,1]
test = PortfolioLossCalculation(K1 = K1, K2 = K2, Fs = Fs, Rs =Rs, betas = betas,
start_dates = start_dates,end_dates = end_dates,freqs=freqs,
coupons = coupons,referenceDates = referenceDates,ratings=ratings,bootstrap = False)
test.CDOPortfolio()
| mit |
nils-wisiol/pypuf | pypuf/studies/ipuf/variants_mlp.py | 1 | 26239 | """
This module describes a study that defines a set of experiments in order to examine the quality of Deep Learning based
modeling attacks on Interpose PUFs variants. Furthermore, some plots are defined to visualize the experiment's results.
Results are used in Wisiol et al., "Splitting the Interpose PUF: A Novel Modeling Attack Strategy", CHES 2020.
References:
[1] F. Rosenblatt, "The Perceptron: A Probabilistic Model for Information Storage and Organization in the
Brain.", Psychological Review, volume 65, pp. 386-408, 1958.
[2] D. Kingma and J. Ba, “Adam: A Method for Stochastic Optimization”, arXiv:1412.6980, 2014.
[3] F., Pedregosa et al., "Scikit-learn: Machine learning in Python", Journal of Machine Learning Research, volume 12,
pp. 2825-2830, 2011.
https://scikit-learn.org
"""
from os import getpid
from typing import NamedTuple, Iterable, List
from uuid import UUID
from uuid import uuid4
from numpy import concatenate, prod, sqrt, average, isinf, Inf
from numpy.core._multiarray_umath import ndarray
from numpy.random.mtrand import RandomState
from pandas import DataFrame
from pypuf import tools
from pypuf.experiments.experiment.base import Experiment
from pypuf.learner.neural_networks.mlp_skl import MultiLayerPerceptronScikitLearn
from pypuf.simulation.arbiter_based.arbiter_puf import XORArbiterPUF
from pypuf.simulation.arbiter_based.ltfarray import LTFArray
from pypuf.simulation.base import Simulation
from pypuf.studies.base import Study
from pypuf.studies.ipuf.split import SplitAttackStudy
class Interpose3PUF(Simulation):
"""
The Domino-iPUF.
"""
def __init__(self, n: int, k_up: int, k_middle: int, k_down: int, seed: int, noisiness: float = 0) -> None:
self.seed = seed
self.prng = RandomState(seed)
self.n = n
self.k = k_up
self.k_up, self.k_middle, self.k_down = k_up, k_middle, k_down
self.chains = k_up + k_middle + k_down
self.xors = k_up if k_up > 1 else 0 + k_middle if k_middle > 1 else 0 + k_down if k_down > 1 else 0
self.interposings = 3
self.noisiness = noisiness
seeds = [self.prng.randint(0, 2 ** 32) for _ in range(6)]
self.up = XORArbiterPUF(n=n, k=k_up, seed=seeds[0], noisiness=noisiness, noise_seed=seeds[1])
self.middle = XORArbiterPUF(n=n + 1, k=k_up, seed=seeds[2], noisiness=noisiness, noise_seed=seeds[3])
self.down = XORArbiterPUF(n=n + 1, k=k_up, seed=seeds[4], noisiness=noisiness, noise_seed=seeds[5])
self.interpose_pos = n // 2
def __repr__(self) -> str:
return f'Interpose3PUF, n={self.n}, k_up={self.k_up}, k_middle={self.k_middle}, k_down={self.k_down}, ' \
f'pos={self.interpose_pos}'
def challenge_length(self) -> int:
return self.up.challenge_length()
def response_length(self) -> int:
return self.down.response_length()
def _interpose(self, challenges, bits):
pos = self.interpose_pos
return concatenate(
(challenges[:, :pos], bits.reshape(-1, 1), challenges[:, pos:]),
axis=1,
)
def eval(self, challenges: ndarray) -> ndarray:
return self.down.eval(self._interpose(
challenges=challenges,
bits=self.middle.eval(self._interpose(
challenges=challenges,
bits=self.up.eval(challenges)
))
))
class InterposeBinaryTree(Simulation):
"""
The Tree-iPUF.
"""
def __init__(self, n: int, ks: List[int], seed: int, noisiness: float = 0) -> None:
self.seed = seed
self.prng = RandomState(seed)
self.n = n
self.ks = ks
self.k = ks[0]
self.depth = len(ks) - 1
self.chains = sum([k * (2 ** i) for i, k in enumerate(ks)])
self.xors = sum([k * 2 ** i if k > 1 else 0 for i, k in enumerate(ks)])
self.interposings = 2 ** (self.depth + 1) - 2
self.noisiness = noisiness
self.layers = \
[
[
XORArbiterPUF(
n=n + 1 if i > 0 else n,
k=ks[i],
seed=self.prng.randint(0, 2 ** 32),
noisiness=noisiness,
noise_seed=self.prng.randint(0, 2 ** 32)
)
for _ in range(2 ** i)
]
for i in range(self.depth + 1)
]
self.interpose_pos = n // 2
def __repr__(self) -> str:
return f'InterposeBinaryTree, n={self.n}, k={self.k}, depth={self.depth}, pos={self.interpose_pos}'
def challenge_length(self) -> int:
return self.layers[0][0].challenge_length()
def response_length(self) -> int:
return 1
def _interpose(self, challenges, bits):
pos = self.interpose_pos
return concatenate(
(challenges[:, :pos], bits.reshape(-1, 1), challenges[:, pos:]),
axis=1,
)
def eval(self, challenges: ndarray) -> ndarray:
responses = [self.layers[0][0].eval(challenges=challenges)]
for i in range(self.depth - 1):
responses = [self.layers[i + 1][j].eval(
challenges=self._interpose(challenges=challenges, bits=responses[int(j / 2)])
) for j in range(len(self.layers[i + 1]))]
return prod(responses, axis=0)
class InterposeCascade(Simulation):
"""
The Cascade-iPUF.
"""
def __init__(self, n: int, ks: List[int], seed: int, noisiness: float = 0) -> None:
self.seed = seed
self.prng = RandomState(seed)
self.n = n
self.k = ks[0]
self.ks = ks
self.chains = sum(ks)
self.xors = self.chains
self.interposings = len(ks)
self.noisiness = noisiness
seeds = [self.prng.randint(0, 2 ** 32) for _ in range(2 * len(ks))]
self.layers = [
XORArbiterPUF(
n=n + 1 if i > 0 else n,
k=k,
seed=seeds[2 * i],
noisiness=noisiness,
noise_seed=seeds[2 * i + 1],
)
for i, k in enumerate(ks)
]
self.interpose_pos = n // 2
def __repr__(self) -> str:
return f'InterposeCascade, n={self.n}, ks={str(self.ks)}, pos={self.interpose_pos}'
def challenge_length(self) -> int:
return self.layers[0].challenge_length()
def response_length(self) -> int:
return 1
def _interpose(self, challenges, bits):
pos = self.interpose_pos
return concatenate(
(challenges[:, :pos], bits.reshape(-1, 1), challenges[:, pos:]),
axis=1,
)
def eval(self, challenges: ndarray) -> ndarray:
result = 1
for i, layer in enumerate(self.layers):
result = result * layer.eval(self._interpose(challenges=challenges, bits=result) if i > 0 else challenges)
return result
class XORInterposePUF(Simulation):
"""
The XOR-iPUF.
"""
def __init__(self, n: int, k: int, seed: int, noisiness: float = 0) -> None:
self.seed = seed
self.prng = RandomState(seed)
self.n = n
self.k = k
self.chains = 2 * k
self.xors = k
self.interposings = k
self.noisiness = noisiness
seeds = [self.prng.randint(0, 2 ** 32) for _ in range(4 * k)]
self.layers_up = [
XORArbiterPUF(n=n, k=1, seed=seeds[2 * i], noisiness=noisiness, noise_seed=seeds[2 * i + 1])
for i in range(k)
]
self.layers_down = [
XORArbiterPUF(n=n + 1, k=1, seed=seeds[2 * (i + k)], noisiness=noisiness, noise_seed=seeds[2 * (i + k) + 1])
for i in range(k)
]
self.interpose_pos = n // 2
def __repr__(self) -> str:
return f'XORInterposePUF, n={self.n}, k={self.k}, pos={self.interpose_pos}'
def challenge_length(self) -> int:
return self.layers_up[0].challenge_length()
def response_length(self) -> int:
return 1
def _interpose(self, challenges, bits):
pos = self.interpose_pos
return concatenate(
(challenges[:, :pos], bits.reshape(-1, 1), challenges[:, pos:]),
axis=1,
)
def eval(self, challenges: ndarray) -> ndarray:
return prod(
a=[self.layers_down[i].eval(self._interpose(
challenges=challenges,
bits=self.layers_up[i].eval(challenges)
)) for i in range(self.k)],
axis=0,
)
class XORInterpose3PUF(Simulation):
"""
The XOR-Domino-iPUF.
"""
def __init__(self, n: int, k: int, seed: int, noisiness: float = 0) -> None:
self.seed = seed
self.prng = RandomState(seed)
self.n = n
self.k = k
self.chains = 3 * k
self.xors = k
self.interposings = 2 * k
self.noisiness = noisiness
seeds = [self.prng.randint(0, 2 ** 32) for _ in range(6 * k)]
self.layers_up = [
XORArbiterPUF(n=n, k=1, seed=seeds[2 * i], noisiness=noisiness, noise_seed=seeds[2 * i + 1])
for i in range(k)
]
self.layers_middle = [
XORArbiterPUF(n=n + 1, k=1, seed=seeds[2 * (i + k)], noisiness=noisiness, noise_seed=seeds[2 * (i + k) + 1])
for i in range(k)
]
self.layers_down = [
XORArbiterPUF(n=n + 1, k=1, seed=seeds[2 * (i+2*k)], noisiness=noisiness, noise_seed=seeds[2 * (i+2*k) + 1])
for i in range(k)
]
self.interpose_pos = n // 2
def __repr__(self) -> str:
return f'XORInterpose3PUF, n={self.n}, k={self.k}, pos={self.interpose_pos}'
def challenge_length(self) -> int:
return self.layers_up[0].challenge_length()
def response_length(self) -> int:
return 1
def _interpose(self, challenges, bits):
pos = self.interpose_pos
return concatenate(
(challenges[:, :pos], bits.reshape(-1, 1), challenges[:, pos:]),
axis=1,
)
def eval(self, challenges: ndarray) -> ndarray:
return prod(
a=[self.layers_down[i].eval(self._interpose(
challenges=challenges,
bits=self.layers_middle[i].eval(self._interpose(
challenges=challenges,
bits=self.layers_up[i].eval(challenges)
))
)) for i in range(self.k)],
axis=0,
)
class Parameters(NamedTuple):
"""
Defines a iPUF-Variant to be modeled with MLP.
"""
simulation: Simulation
seed_simulation: int
noisiness: float
seed: int
N: int
validation_frac: float
preprocessing: str
layers: Iterable[int]
learning_rate: float
tolerance: float
patience: int
iteration_limit: int
batch_size: int
class Result(NamedTuple):
"""
Result of an attack on an iPUF-variant using MLP.
"""
name: str
n: int
first_k: int
num_chains: int
num_xors: int
num_interposings: int
experiment_id: UUID
pid: int
measured_time: float
iterations: int
accuracy: float
accuracy_relative: float
stability: float
reliability: float
loss_curve: Iterable[float]
accuracy_curve: Iterable[float]
max_memory: int
class ExperimentMLPScikitLearn(Experiment):
"""
Model an iPUF-variant using MLP.
"""
NAME = 'Multilayer Perceptron (scikit-learn)'
COMPRESSION = True
def __init__(self, progress_log_prefix, parameters):
self.id = uuid4()
progress_log_name = None if not progress_log_prefix else f'{progress_log_prefix}_{self.id}'
super().__init__(progress_log_name=progress_log_name, parameters=parameters)
self.simulation = parameters.simulation
self.stability = 1.0
self.reliability = 1.0
self.training_set = None
self.learner = None
self.model = None
def prepare(self):
self.stability = 1.0 - tools.approx_dist(
instance1=self.simulation,
instance2=self.simulation,
num=10 ** 4,
random_instance=RandomState(seed=self.parameters.seed),
)
self.stability = max(self.stability, 1 - self.stability)
self.reliability = (1 + sqrt(2 * self.stability - 1)) / 2 # estimation of non-noisy vs. noisy
self.progress_logger.debug(f'Gathering training set with {self.parameters.N} examples')
self.training_set = tools.TrainingSet(
instance=self.simulation,
N=self.parameters.N,
random_instance=RandomState(seed=self.parameters.seed),
)
self.progress_logger.debug('Setting up learner')
self.learner = MultiLayerPerceptronScikitLearn(
n=self.parameters.simulation.n,
k=self.parameters.simulation.k,
training_set=self.training_set,
validation_frac=self.parameters.validation_frac,
transformation=LTFArray.transform_atf,
preprocessing='short',
layers=self.parameters.layers,
learning_rate=self.parameters.learning_rate,
penalty=0.0002,
beta_1=0.9,
beta_2=0.999,
tolerance=self.parameters.tolerance,
patience=self.parameters.patience,
iteration_limit=self.parameters.iteration_limit,
batch_size=self.parameters.batch_size,
seed_model=self.parameters.seed,
print_learning=False,
logger=self.progress_logger.debug,
goal=0.95 * self.reliability,
)
self.learner.prepare()
def run(self):
if self.stability < 0.65:
self.progress_logger.debug(f'The stability of the target is too low: {self.stability}')
return
self.progress_logger.debug('Starting learner')
self.model = self.learner.learn()
def analyze(self):
self.progress_logger.debug('Analyzing result')
accuracy = -1 if not self.model else 1.0 - tools.approx_dist(
instance1=self.simulation,
instance2=self.model,
num=10 ** 4,
random_instance=RandomState(seed=self.parameters.seed),
)
return Result(
name=self.NAME,
n=self.parameters.simulation.n,
first_k=self.parameters.simulation.k,
num_chains=self.parameters.simulation.chains,
num_xors=self.parameters.simulation.xors,
num_interposings=self.parameters.simulation.interposings,
experiment_id=self.id,
pid=getpid(),
measured_time=self.measured_time,
iterations=-1 if not self.model else self.learner.nn.n_iter_,
accuracy=accuracy,
accuracy_relative=accuracy / self.reliability,
stability=self.stability,
reliability=self.reliability,
loss_curve=[-1] if not self.model else [round(loss, 3) for loss in self.learner.nn.loss_curve_],
accuracy_curve=[-1] if not self.model else [round(accuracy, 3) for accuracy in self.learner.accuracy_curve],
max_memory=self.max_memory(),
)
class InterposeMLPStudy(Study):
"""
A study containing a number of iPUF-variants for various parameterizations, conducting MLP-based modeling attacks.
"""
SHUFFLE = True
ITERATION_LIMIT = 400
PATIENCE = ITERATION_LIMIT
MAX_NUM_VAL = 10000
MIN_NUM_VAL = 200
PRINT_LEARNING = False
LENGTH = 64
SEED = 42
NOISINESS = 0.1
SAMPLES_PER_POINT = 100
BATCH_FRAC = [0.05]
def experiments(self):
definitions = [
definition for i in range(self.SAMPLES_PER_POINT) for definition in
[
(Interpose3PUF(self.LENGTH, 2, 1, 1, (self.SEED + 1000 + i) % 2 ** 32, self.NOISINESS),
[400000], [[2 ** 4] * 3], [0.02]),
(Interpose3PUF(self.LENGTH, 2, 2, 2, (self.SEED + 2000 + i) % 2 ** 32, self.NOISINESS),
[400000], [[2 ** 4] * 3], [0.02]),
(Interpose3PUF(self.LENGTH, 3, 1, 1, (self.SEED + 3000 + i) % 2 ** 32, self.NOISINESS),
[2000000], [[2 ** 6] * 3], [0.01]),
(Interpose3PUF(self.LENGTH, 3, 3, 3, (self.SEED + 4000 + i) % 2 ** 32, self.NOISINESS),
[2000000], [[2 ** 6] * 3], [0.01]),
(Interpose3PUF(self.LENGTH, 4, 1, 1, (self.SEED + 5000 + i) % 2 ** 32, self.NOISINESS),
[20000000], [[2 ** 7] * 3], [0.0075]),
(Interpose3PUF(self.LENGTH, 4, 4, 4, (self.SEED + 6000 + i) % 2 ** 32, self.NOISINESS),
[20000000], [[2 ** 7] * 3], [0.0075]),
(Interpose3PUF(self.LENGTH, 5, 1, 1, (self.SEED + 7000 + i) % 2 ** 32, self.NOISINESS),
[50000000], [[2 ** 8] * 3], [0.0001, 0.005]),
(Interpose3PUF(self.LENGTH, 5, 5, 5, (self.SEED + 8000 + i) % 2 ** 32, self.NOISINESS),
[50000000], [[2 ** 8] * 3], [0.0001, 0.005]),
(InterposeBinaryTree(self.LENGTH, [1, 1, 1], (self.SEED + 20000 + i) % 2 ** 32, self.NOISINESS),
[500000], [[2 ** 7] * 3], [0.008]),
(InterposeBinaryTree(self.LENGTH, [2, 2, 2], (self.SEED + 22000 + i) % 2 ** 32, self.NOISINESS),
[5000000], [[2 ** 9] * 3], [0.004]),
(InterposeBinaryTree(self.LENGTH, [1, 1, 1, 1], (self.SEED + 22000 + i) % 2 ** 32, self.NOISINESS),
[5000000], [[2 ** 9] * 3], [0.004]),
(InterposeCascade(self.LENGTH, [1] * 2, (self.SEED + 40000 + i) % 2 ** 32, self.NOISINESS),
[80000], [[2 ** 2] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 3, (self.SEED + 41000 + i) % 2 ** 32, self.NOISINESS),
[120000], [[2 ** 3] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 4, (self.SEED + 42000 + i) % 2 ** 32, self.NOISINESS),
[200000], [[2 ** 4] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 5, (self.SEED + 43000 + i) % 2 ** 32, self.NOISINESS),
[400000], [[2 ** 5] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 6, (self.SEED + 44000 + i) % 2 ** 32, self.NOISINESS),
[1000000], [[2 ** 6] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 7, (self.SEED + 45000 + i) % 2 ** 32, self.NOISINESS),
[30000000], [[2 ** 7] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [1] * 8, (self.SEED + 46000 + i) % 2 ** 32, self.NOISINESS),
[10000000], [[2 ** 8] * 3], [0.01]),
(InterposeCascade(self.LENGTH, [2] * 2, (self.SEED + 47000 + i) % 2 ** 32, self.NOISINESS),
[200000], [[2 ** 4] * 3], [0.02]),
(InterposeCascade(self.LENGTH, [2] * 3, (self.SEED + 48000 + i) % 2 ** 32, self.NOISINESS),
[500000], [[2 ** 5] * 3], [0.02]),
(InterposeCascade(self.LENGTH, [2] * 4, (self.SEED + 49000 + i) % 2 ** 32, self.NOISINESS),
[2000000], [[2 ** 6] * 3], [0.02]),
(InterposeCascade(self.LENGTH, [2] * 5, (self.SEED + 50000 + i) % 2 ** 32, self.NOISINESS),
[10000000], [[2 ** 7] * 3], [0.005]),
(InterposeCascade(self.LENGTH, [3] * 2, (self.SEED + 51000 + i) % 2 ** 32, self.NOISINESS),
[2000000], [[2 ** 7] * 3], [0.003]),
(InterposeCascade(self.LENGTH, [3] * 3, (self.SEED + 52000 + i) % 2 ** 32, self.NOISINESS),
[10000000], [[2 ** 7] * 3], [0.002]),
(InterposeCascade(self.LENGTH, [4] * 2, (self.SEED + 53000 + i) % 2 ** 32, self.NOISINESS),
[5000000], [[2 ** 8] * 3], [0.001]),
(InterposeCascade(self.LENGTH, [5] * 2, (self.SEED + 54000 + i) % 2 ** 32, self.NOISINESS),
[20000000], [[2 ** 8] * 3], [0.001]),
(XORInterposePUF(self.LENGTH, 2, (self.SEED + 60000 + i) % 2 ** 32, self.NOISINESS),
[100000], [[2 ** 4] * 3], [0.01]),
(XORInterposePUF(self.LENGTH, 3, (self.SEED + 61000 + i) % 2 ** 32, self.NOISINESS),
[400000], [[2 ** 5] * 3], [0.01]),
(XORInterposePUF(self.LENGTH, 4, (self.SEED + 62000 + i) % 2 ** 32, self.NOISINESS),
[10000000], [[2 ** 7] * 3], [0.005]),
(XORInterposePUF(self.LENGTH, 5, (self.SEED + 63000 + i) % 2 ** 32, self.NOISINESS),
[40000000], [[2 ** 8] * 3], [0.0025]),
(XORInterpose3PUF(self.LENGTH, 2, (self.SEED + 80000 + i) % 2 ** 32, self.NOISINESS),
[200000], [[2 ** 4] * 3], [0.01]),
(XORInterpose3PUF(self.LENGTH, 3, (self.SEED + 81000 + i) % 2 ** 32, self.NOISINESS),
[2000000], [[2 ** 5] * 3], [0.01]),
(XORInterpose3PUF(self.LENGTH, 4, (self.SEED + 82000 + i) % 2 ** 32, self.NOISINESS),
[40000000], [[2 ** 8] * 3], [0.0025]),
]
]
return [
ExperimentMLPScikitLearn(
progress_log_prefix=self.name(),
parameters=Parameters(
simulation=simulation,
seed_simulation=simulation.seed,
noisiness=simulation.noisiness,
seed=self.SEED + i,
N=N,
validation_frac=max(min(N // 20, self.MAX_NUM_VAL), self.MIN_NUM_VAL) / N,
preprocessing='short',
layers=layers,
learning_rate=learning_rate,
tolerance=0.0025,
patience=self.PATIENCE,
iteration_limit=self.ITERATION_LIMIT,
batch_size=int(N * batch_frac),
)
)
for i, (simulation, Ns, structures, learning_rates) in enumerate(definitions)
for N in Ns
for layers in structures
for learning_rate in learning_rates
for batch_frac in self.BATCH_FRAC
]
def plot(self):
data = self.experimenter.results
data['success'] = data.apply(lambda row: row['accuracy_relative'] >= .90, axis=1)
data['threads'] = data.apply(SplitAttackStudy.num_threads, axis=1)
groups = data.groupby(['N', 'simulation', 'num_chains', 'threads', 'cpu'])
rt_data = DataFrame(columns=['N', 'simulation', 'num_chains', 'threads', 'cpu',
'success_rate', 'avg_time_success', 'avg_time_fail', 'num_success', 'num_fail',
'num_total', 'time_to_success', 'reliability', 'memory_avg_gib', 'memory_max_gib',
'avg_rel_accuracy'])
for (N, simulation, num_chains, threads, cpu), g_data in groups:
num_success = len(g_data[g_data['success']].index)
num_total = len(g_data.index)
success_rate = num_success / num_total
mean_time_success = average(g_data[g_data['success']]['measured_time'])
mean_time_fail = average(g_data[~g_data['success']]['measured_time']) if success_rate < 1 else 0
exp_number_of_trials_until_success = 1 / success_rate if success_rate > 0 else Inf # Geometric dist.
if isinf(exp_number_of_trials_until_success):
time_to_success = Inf
else:
time_to_success = (exp_number_of_trials_until_success - 1) * mean_time_fail + mean_time_success
reliability = g_data['reliability'].mean()
rt_data = rt_data.append(
{
'N': N, 'simulation': simulation, 'num_chains': num_chains, 'threads': threads, 'cpu': cpu,
'success_rate': success_rate,
'avg_time_success': mean_time_success,
'avg_time_fail': mean_time_fail,
'num_success': num_success,
'num_fail': num_total - num_success,
'num_total': num_total,
'time_to_success': time_to_success,
'reliability': round(reliability * 100 // 10 * 10 / 100, 2),
'memory_avg_gib': g_data['max_memory'].mean() / 1024**3,
'memory_max_gib': g_data['max_memory'].max() / 1024**3,
'avg_rel_accuracy': g_data['accuracy_relative'].mean(),
},
ignore_index=True,
)
rt_data = rt_data.sort_values(['simulation', 'num_chains', 'N', 'reliability'])
rt_data['time_to_success'] = rt_data.apply(lambda row: SplitAttackStudy.time_cat(row['time_to_success']),
axis=1)
table_cols = ['simulation_friendly_name', 'simulation', 'num_chains', 'N_cat', 'reliability', 'memory_avg_gib',
'time_to_success', 'success_rate', 'num_total']
def reader_friendly_name(simulation_name):
for technical_name, friendly_name in {
'Interpose3PUF': 'Domino-iPUF',
'XORInterposePUF': 'XOR-iPUF',
'XORInterpose3PUF': 'XOR-Domino-iPUF',
'InterposeBinaryTree': 'Tree-iPUF',
'InterposeCascade': 'Cascade-iPUF',
}.items():
if str(simulation_name).startswith(technical_name):
return friendly_name
return simulation_name
rt_data['simulation_friendly_name'] = rt_data.apply(lambda row: reader_friendly_name(row['simulation']), axis=1)
rt_data['success_rate'] = rt_data['success_rate'].round(2)
rt_data['memory_avg_gib'] = rt_data['memory_avg_gib'].round(1)
rt_data['num_chains'] = rt_data['num_chains'].astype('int')
rt_data['N_cat'] = rt_data.apply(lambda row: SplitAttackStudy.N_cat(row['N']), axis=1)
print(rt_data[rt_data['num_chains'] >= 8][table_cols].to_latex(index=False, escape=False))
| gpl-3.0 |
rahuldhote/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
ZobairAlijan/osf.io | scripts/analytics/utils.py | 30 | 1244 | # -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import requests
from website import util
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def send_file(app, name, content_type, file_like, node, user):
"""Upload file to OSF."""
file_like.seek(0)
with app.test_request_context():
upload_url = util.waterbutler_url_for('upload', 'osfstorage', name, node, user=user)
requests.put(
upload_url,
data=file_like,
headers={'Content-Type': content_type},
)
| apache-2.0 |
tspus/python-matchingPursuit | src/drawing.py | 1 | 5553 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
# This file is part of Matching Pursuit Python program (python-MP).
#
# python-MP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-MP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-MP. If not, see <http://www.gnu.org/licenses/>.
author: Tomasz Spustek
e-mail: [email protected]
University of Warsaw, July 06, 2015
'''
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.signal import resample
from scipy import interpolate
from dictionary import tukey , genericEnvelope
def plotIter(book,signal,time,number):
plt.figure()
plt.subplot(2,1,1)
plt.plot(time,signal)
plt.subplot(2,1,2)
plt.plot(time,book['reconstruction'][number].real)
plt.show()
def calculateTFMap(book,time,samplingFrequency,mapType,*argv):
'''
mapType:int
- 0 - realvalued amplitude t-f map
- 1 - comlex amplitude t-f map
'''
if len(argv) == 2:
mapStructFreqs = argv[0]
mapStructWidths = argv[1]
else:
mapStructFreqs = [0.0 , samplingFrequency/2.0]
mapStructWidths = [0.0 , 2 * time.shape[0]/samplingFrequency]
mapFsize = 1000
mapTsize = np.array([2000 , time.shape[0]]).min()
if time.shape[0] > mapTsize:
timeFinal = time[-1] * np.linspace(0,1,mapTsize) / samplingFrequency
else:
timeFinal = (time[-1] - time[0]) / samplingFrequency * np.linspace(0,1,mapTsize)
frequenciesFinal = samplingFrequency / 2 * np.linspace(0,1,mapFsize)
if mapType == 0:
timeFreqMap = np.zeros([frequenciesFinal.shape[0] , timeFinal.shape[0]])
elif mapType == 1:
timeFreqMap = np.zeros([frequenciesFinal.shape[0] , timeFinal.shape[0]] , dtype='complex')
smoothingWindow = tukey(time.shape[0] , 0.1)
for index, atom in book.iterrows():
if (atom['frequency'] >= mapStructFreqs[0] and atom['frequency'] <= mapStructFreqs[1]) and (atom['width'] >= mapStructWidths[0] and atom['width'] <= mapStructWidths[1]):
if mapType == 0:
timeCourse = getAtomReconstruction(atom , time)
signal2fft = timeCourse * smoothingWindow
zz = np.fft.fft(signal2fft)
z = np.abs( zz[0 : int(np.floor(zz.shape[0]/2+1))] )
z = halfWidthGauss(z)
if z.shape[0] > frequenciesFinal.shape[0]:
z = resample(z, frequenciesFinal.shape[0])
else:
x = np.arange(0, z.shape[0])
f = interpolate.interp1d(x, z)
z = f( np.arange(0, z.shape[0]-1 , (z.shape[0]-1)/frequenciesFinal.shape[0]) )
z = z / z.max()
envelope = getAtomEnvelope(atom , time) * np.abs(atom['modulus']['complex'])
envelope = resample(envelope , timeFinal.shape[0])
timeFreqMap += np.outer(z , envelope)
elif mapType == 1:
totalTimeCourse = getAtomReconstruction(atom , time)
signal2fft = totalTimeCourse * smoothingWindow
zz = np.fft.fft(signal2fft)
z = np.abs( zz[0 : np.floor(zz.shape[0]/2+1)] )
z = halfWidthGauss(z)
if z.shape[0] > frequenciesFinal.shape[0]:
z = resample(z, frequenciesFinal.shape[0])
else:
x = np.arange(0, z.shape[0])
f = interpolate.interp1d(x, z)
z = f( np.arange(0, z.shape[0]-1 , (z.shape[0]-1)/frequenciesFinal.shape[0]) )
z = z / z.max()
timeFreqMap += np.outer(z , totalTimeCourse)
return (timeFinal , frequenciesFinal , timeFreqMap)
def getReconstruction(book , time , limits=[]):
if limits == []:
reconstruction = np.zeros(time.shape)
for (index,atom) in book.iterrows():
reconstruction += getAtomReconstruction(atom , time)
else:
amplitudeLimits = limits[0]
positionLimits = limits[1]
frequencyLimits = limits[2]
widthLimits = limits[3]
reconstruction = np.zeros(time.shape)
for (index,atom) in book.iterrows():
if (atom['amplitude'] > amplitudeLimits[0] and atom['amplitude'] < amplitudeLimits[1]) and (atom['atomLatency'] > positionLimits[0] and atom['atomLatency'] < positionLimits[1]) and (atom['frequency'] > frequencyLimits[0] and atom['frequency'] < frequencyLimits[1]) and (atom['width'] > widthLimits[0] and atom['width'] < widthLimits[1]):
reconstruction += getAtomReconstruction(atom , time)
return reconstruction
def getAtomReconstruction(atom , time):
envelope = getAtomEnvelope(atom , time)
timeShifted = time - atom['time_0']
reconstruction = np.zeros(time.shape , dtype='complex')
reconstruction = atom['modulus']['complex'] * envelope * np.exp(1j * atom['omega'] * timeShifted)
return reconstruction.real
def getAtomEnvelope(atom , time):
mi = atom['mi']
increase = atom['increase']
decay = atom['decay']
sigma = atom['sigma']
envelope = genericEnvelope(sigma , time , atom['shapeType'] , 0 , mi , increase , decay)[0]
return envelope
def halfWidthGauss(z):
mz = z.max()
mzi = z.argmax()
ID = np.where(z[0:mzi]-0.5*mz < 0)[0]
if ID.shape[0] == 0:
L = mzi
else:
L = mzi - ID[-1]
ID = np.where(z[mzi:]-0.5*mz < 0)[0]
if ID.shape[0] == 0:
R = z.shape[0]
else:
R = ID[0]
sigma = (L+R) / 2 / np.sqrt(np.log(4))
t = np.arange(1,z.shape[0])
return mz * np.exp(-1*(t-mzi)**2 / 2 / (sigma**2))
| gpl-3.0 |
nelango/ViralityAnalysis | model/lib/pandas/tests/test_indexing.py | 9 | 193467 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import sys
import nose
import itertools
import warnings
from datetime import datetime
from pandas.compat import range, lrange, lzip, StringIO, lmap, map
from pandas.tslib import NaT
from numpy import nan
from numpy.random import randn
import numpy as np
import pandas as pd
import pandas.core.common as com
from pandas import option_context
from pandas.core.indexing import _non_reducing_slice, _maybe_numeric_slice
from pandas.core.api import (DataFrame, Index, Series, Panel, isnull,
MultiIndex, Float64Index, Timestamp, Timedelta)
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_attr_equal)
from pandas import concat, lib
from pandas.io.common import PerformanceWarning
import pandas.util.testing as tm
from pandas import date_range
from numpy.testing.decorators import slow
_verbose = False
#-------------------------------------------------------------------------------
# Indexing test cases
def _generate_indices(f, values=False):
""" generate the indicies
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = [ lrange(len(a)) for a in axes ]
return itertools.product(*axes)
def _get_value(f, i, values=False):
""" return the value for the location i """
# check agains values
if values:
return f.values[i]
# this is equiv of f[col][row].....
#v = f
#for a in reversed(i):
# v = v.__getitem__(a)
#return v
return f.ix[i]
def _get_result(obj, method, key, axis):
""" return the result for this obj with this key and this axis """
if isinstance(key, dict):
key = key[axis]
# use an artifical conversion to map the key as integers to the labels
# so ix can work for comparisions
if method == 'indexer':
method = 'ix'
key = obj._get_axis(axis)[key]
# in case we actually want 0 index slicing
try:
xp = getattr(obj, method).__getitem__(_axify(obj,key,axis))
except:
xp = getattr(obj, method).__getitem__(key)
return xp
def _axify(obj, key, axis):
# create a tuple accessor
if axis is not None:
axes = [ slice(None) ] * obj.ndim
axes[axis] = key
return tuple(axes)
return k
def _mklbl(prefix,n):
return ["%s%s" % (prefix,i) for i in range(n)]
class TestIndexing(tm.TestCase):
_multiprocess_can_split_ = True
_objs = set(['series','frame','panel'])
_typs = set(['ints','labels','mixed','ts','floats','empty'])
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.series_ints = Series(np.random.rand(4), index=lrange(0,8,2))
self.frame_ints = DataFrame(np.random.randn(4, 4), index=lrange(0, 8, 2), columns=lrange(0,12,3))
self.panel_ints = Panel(np.random.rand(4,4,4), items=lrange(0,8,2),major_axis=lrange(0,12,3),minor_axis=lrange(0,16,4))
self.series_labels = Series(np.random.randn(4), index=list('abcd'))
self.frame_labels = DataFrame(np.random.randn(4, 4), index=list('abcd'), columns=list('ABCD'))
self.panel_labels = Panel(np.random.randn(4,4,4), items=list('abcd'), major_axis=list('ABCD'), minor_axis=list('ZYXW'))
self.series_mixed = Series(np.random.randn(4), index=[2, 4, 'null', 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, 'null', 8])
self.panel_mixed = Panel(np.random.randn(4,4,4), items=[2,4,'null',8])
self.series_ts = Series(np.random.randn(4), index=date_range('20130101', periods=4))
self.frame_ts = DataFrame(np.random.randn(4, 4), index=date_range('20130101', periods=4))
self.panel_ts = Panel(np.random.randn(4, 4, 4), items=date_range('20130101', periods=4))
#self.series_floats = Series(np.random.randn(4), index=[1.00, 2.00, 3.00, 4.00])
#self.frame_floats = DataFrame(np.random.randn(4, 4), columns=[1.00, 2.00, 3.00, 4.00])
#self.panel_floats = Panel(np.random.rand(4,4,4), items = [1.00,2.00,3.00,4.00])
self.frame_empty = DataFrame({})
self.series_empty = Series({})
self.panel_empty = Panel({})
# form agglomerates
for o in self._objs:
d = dict()
for t in self._typs:
d[t] = getattr(self,'%s_%s' % (o,t),None)
setattr(self,o,d)
def check_values(self, f, func, values = False):
if f is None: return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f,func)[i]
# check agains values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
assert_almost_equal(result, expected)
def check_result(self, name, method1, key1, method2, key2, typs = None, objs = None, axes = None, fails = None):
def _eq(t, o, a, obj, k1, k2):
""" compare equal for these 2 keys """
if a is not None and a > obj.ndim-1:
return
def _print(result, error = None):
if error is not None:
error = str(error)
v = "%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name,result,t,o,method1,method2,a,error or '')
if _verbose:
com.pprint_thing(v)
try:
### good debug location ###
#if name == 'bool' and t == 'empty' and o == 'series' and method1 == 'loc':
# import pdb; pdb.set_trace()
rs = getattr(obj, method1).__getitem__(_axify(obj,k1,a))
try:
xp = _get_result(obj,method2,k2,a)
except:
result = 'no comp'
_print(result)
return
try:
if np.isscalar(rs) and np.isscalar(xp):
self.assertEqual(rs, xp)
elif xp.ndim == 1:
assert_series_equal(rs,xp)
elif xp.ndim == 2:
assert_frame_equal(rs,xp)
elif xp.ndim == 3:
assert_panel_equal(rs,xp)
result = 'ok'
except (AssertionError):
result = 'fail'
# reverse the checks
if fails is True:
if result == 'fail':
result = 'ok (fail)'
if not result.startswith('ok'):
raise AssertionError(_print(result))
_print(result)
except AssertionError:
raise
except Exception as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
result = 'ok (%s)' % type(detail).__name__
_print(result)
return
result = type(detail).__name__
raise AssertionError(_print(result, error = detail))
if typs is None:
typs = self._typs
if objs is None:
objs = self._objs
if axes is not None:
if not isinstance(axes,(tuple,list)):
axes = [ axes ]
else:
axes = list(axes)
else:
axes = [ 0, 1, 2]
# check
for o in objs:
if o not in self._objs:
continue
d = getattr(self,o)
for a in axes:
for t in typs:
if t not in self._typs:
continue
obj = d[t]
if obj is not None:
obj = obj.copy()
k2 = key2
_eq(t, o, a, obj, key1, k2)
def test_indexer_caching(self):
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = [lrange(n), lrange(n)]
index = MultiIndex.from_tuples(lzip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s==0] = 1
assert_series_equal(s,expected)
def test_at_and_iat_get(self):
def _check(f, func, values = False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
result = getattr(f,func)[i]
expected = _get_value(f,i,values)
assert_almost_equal(result, expected)
for o in self._objs:
d = getattr(self,o)
# iat
_check(d['ints'],'iat', values=True)
for f in [d['labels'],d['ts'],d['floats']]:
if f is not None:
self.assertRaises(ValueError, self.check_values, f, 'iat')
# at
_check(d['ints'], 'at')
_check(d['labels'],'at')
_check(d['ts'], 'at')
_check(d['floats'],'at')
def test_at_and_iat_set(self):
def _check(f, func, values = False):
if f is not None:
indicies = _generate_indices(f, values)
for i in indicies:
getattr(f,func)[i] = 1
expected = _get_value(f,i,values)
assert_almost_equal(expected, 1)
for t in self._objs:
d = getattr(self,t)
_check(d['ints'],'iat',values=True)
for f in [d['labels'],d['ts'],d['floats']]:
if f is not None:
self.assertRaises(ValueError, _check, f, 'iat')
# at
_check(d['ints'], 'at')
_check(d['labels'],'at')
_check(d['ts'], 'at')
_check(d['floats'],'at')
def test_at_iat_coercion(self):
# as timestamp is not a tuple!
dates = date_range('1/1/2000', periods=8)
df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
s = df['A']
result = s.at[dates[5]]
xp = s.values[5]
self.assertEqual(result, xp)
# GH 7729
# make sure we are boxing the returns
s = Series(['2014-01-01', '2014-02-02'], dtype='datetime64[ns]')
expected = Timestamp('2014-02-02')
for r in [ lambda : s.iat[1], lambda : s.iloc[1] ]:
result = r()
self.assertEqual(result, expected)
s = Series(['1 days','2 days'], dtype='timedelta64[ns]')
expected = Timedelta('2 days')
for r in [ lambda : s.iat[1], lambda : s.iloc[1] ]:
result = r()
self.assertEqual(result, expected)
def test_iat_invalid_args(self):
pass
def test_imethods_with_dups(self):
# GH6493
# iat/iloc with dups
s = Series(range(5), index=[1,1,2,2,3], dtype='int64')
result = s.iloc[2]
self.assertEqual(result,2)
result = s.iat[2]
self.assertEqual(result,2)
self.assertRaises(IndexError, lambda : s.iat[10])
self.assertRaises(IndexError, lambda : s.iat[-10])
result = s.iloc[[2,3]]
expected = Series([2,3],[2,2],dtype='int64')
assert_series_equal(result,expected)
df = s.to_frame()
result = df.iloc[2]
expected = Series(2, index=[0], name=2)
assert_series_equal(result, expected)
result = df.iat[2,0]
expected = 2
self.assertEqual(result,2)
def test_repeated_getitem_dups(self):
# GH 5678
# repeated gettitems on a dup index returing a ndarray
df = DataFrame(np.random.random_sample((20,5)), index=['ABCDE'[x%5] for x in range(20)])
expected = df.loc['A',0]
result = df.loc[:,0].loc['A']
assert_series_equal(result,expected)
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20,5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assertRaisesRegexp(IndexError, 'positional indexers are out-of-bounds'):
df.iloc[:,[0,1,2,3,4,5]]
self.assertRaises(IndexError, lambda : df.iloc[[1,30]])
self.assertRaises(IndexError, lambda : df.iloc[[1,-30]])
self.assertRaises(IndexError, lambda : df.iloc[[100]])
s = df['A']
self.assertRaises(IndexError, lambda : s.iloc[[100]])
self.assertRaises(IndexError, lambda : s.iloc[[-100]])
# still raise on a single indexer
with tm.assertRaisesRegexp(IndexError, 'single positional indexer is out-of-bounds'):
df.iloc[30]
self.assertRaises(IndexError, lambda : df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise an IndexError
with tm.assertRaisesRegexp(IndexError, 'single positional indexer is out-of-bounds'):
s.iloc[30]
self.assertRaises(IndexError, lambda : s.iloc[-30])
# slices are ok
result = df.iloc[:,4:10] # 0 < start < len < stop
expected = df.iloc[:,4:]
assert_frame_equal(result,expected)
result = df.iloc[:,-4:-10] # stop < 0 < start < len
expected = df.iloc[:,:0]
assert_frame_equal(result,expected)
result = df.iloc[:,10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:,:4:-1]
assert_frame_equal(result,expected)
result = df.iloc[:,4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:,4::-1]
assert_frame_equal(result,expected)
result = df.iloc[:,-10:4] # start < 0 < stop < len
expected = df.iloc[:,:4]
assert_frame_equal(result,expected)
result = df.iloc[:,10:4] # 0 < stop < len < start
expected = df.iloc[:,:0]
assert_frame_equal(result,expected)
result = df.iloc[:,-10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:,:0]
assert_frame_equal(result,expected)
result = df.iloc[:,10:11] # 0 < len < start < stop
expected = df.iloc[:,:0]
assert_frame_equal(result,expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
assert_series_equal(result,expected)
result = s.iloc[30:]
expected = s.iloc[:0]
assert_series_equal(result,expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
assert_series_equal(result,expected)
# doc example
def check(result,expected):
str(result)
result.dtypes
assert_frame_equal(result,expected)
dfl = DataFrame(np.random.randn(5,2),columns=list('AB'))
check(dfl.iloc[:,2:3],DataFrame(index=dfl.index))
check(dfl.iloc[:,1:3],dfl.iloc[:,[1]])
check(dfl.iloc[4:6],dfl.iloc[[4]])
self.assertRaises(IndexError, lambda : dfl.iloc[[4,5,6]])
self.assertRaises(IndexError, lambda : dfl.iloc[:,4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix', { 0 : 4, 1: 6, 2: 8 }, typs = ['ints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix', { 0 : 6, 1: 9, 2: 12 }, typs = ['ints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0,1,2], 'ix', { 0 : [0,2,4], 1 : [0,3,6], 2: [0,4,8] }, typs = ['ints'])
self.check_result('list int', 'iloc', [2], 'ix', { 0 : [4], 1 : [6], 2: [8] }, typs = ['ints'])
self.check_result('list int', 'iloc', [0,1,2], 'indexer', [0,1,2], typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
# array of ints
# (GH5006), make sure that a single indexer is returning the correct type
self.check_result('array int', 'iloc', np.array([0,1,2]), 'ix', { 0 : [0,2,4], 1 : [0,3,6], 2: [0,4,8] }, typs = ['ints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix', { 0 : [4], 1 : [6], 2: [8] }, typs = ['ints'])
self.check_result('array int', 'iloc', np.array([0,1,2]), 'indexer', [0,1,2], typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
self.assertEqual(result, expected)
expected = s.iloc[[0]]
result = s.iloc[[-3]]
assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0,1,1,3], 'ix', { 0 : [0,2,2,6], 1 : [0,3,3,9] }, objs = ['series','frame'], typs = ['ints'])
# GH 6766
df1 = DataFrame([{'A':None, 'B':1},{'A':2, 'B':2}])
df2 = DataFrame([{'A':3, 'B':3},{'A':4, 'B':4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0,0]
self.assertTrue(isnull(result))
result = df.iloc[0,:]
expected = Series([np.nan, 1, 3, 3], index=['A','B','A','B'], name=0)
assert_series_equal(result,expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1,4))
self.check_result('array like', 'iloc', s.index, 'ix', { 0 : [2,4,6], 1 : [3,6,9], 2: [4,8,12] }, typs = ['ints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True,False,True,False,]
self.check_result('bool', 'iloc', b, 'ix', b, typs = ['ints'])
self.check_result('bool', 'iloc', b, 'ix', b, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1,3), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'])
self.check_result('slice', 'iloc', slice(1,3), 'indexer', slice(1,3), typs = ['labels','mixed','ts','floats','empty'], fails = IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df2 = DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])
# axis=1
df = concat([df1,df2],axis=1)
assert_frame_equal(df.iloc[:,:4],df1)
assert_frame_equal(df.iloc[:,4:],df2)
df = concat([df2,df1],axis=1)
assert_frame_equal(df.iloc[:,:2],df2)
assert_frame_equal(df.iloc[:,2:],df1)
assert_frame_equal(df.iloc[:,0:3],concat([df2,df1.iloc[:,[0]]],axis=1))
# axis=0
df = concat([df,df],axis=0)
assert_frame_equal(df.iloc[0:10,:2],df2)
assert_frame_equal(df.iloc[0:10,2:],df1)
assert_frame_equal(df.iloc[10:,:2],df2)
assert_frame_equal(df.iloc[10:,2:],df1)
def test_iloc_getitem_multiindex(self):
arr = np.random.randn(3, 3)
df = DataFrame(arr,
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
rs = df.iloc[2]
xp = Series(arr[2],index=df.columns)
assert_series_equal(rs, xp)
rs = df.iloc[:,2]
xp = Series(arr[:, 2],index=df.index)
assert_series_equal(rs, xp)
rs = df.iloc[2,2]
xp = df.values[2,2]
self.assertEqual(rs, xp)
# for multiple items
# GH 5528
rs = df.iloc[[0,1]]
xp = df.xs(4,drop_level=False)
assert_frame_equal(rs,xp)
tup = zip(*[['a','a','b','b'],['x','y','x','y']])
index = MultiIndex.from_tuples(tup)
df = DataFrame(np.random.randn(4, 4), index=index)
rs = df.iloc[[2, 3]]
xp = df.xs('b',drop_level=False)
assert_frame_equal(rs,xp)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1,1] = 1
result = df.iloc[1,1]
self.assertEqual(result, 1)
df.iloc[:,2:3] = 0
expected = df.iloc[:,2:3]
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
# GH5771
s = Series(0,index=[4,5,6])
s.iloc[1:2] += 1
expected = Series([0,1,0],index=[4,5,6])
assert_series_equal(s, expected)
def test_ix_loc_setitem_consistency(self):
# GH 5771
# loc with slice and series
s = Series(0,index=[4,5,6])
s.loc[4:5] += 1
expected = Series([1,1,0],index=[4,5,6])
assert_series_equal(s, expected)
# GH 5928
# chained indexing assignment
df = DataFrame({'a' : [0,1,2] })
expected = df.copy()
expected.ix[[0,1,2],'a'] = -expected.ix[[0,1,2],'a']
df['a'].ix[[0,1,2]] = -df['a'].ix[[0,1,2]]
assert_frame_equal(df,expected)
df = DataFrame({'a' : [0,1,2], 'b' :[0,1,2] })
df['a'].ix[[0,1,2]] = -df['a'].ix[[0,1,2]].astype('float64') + 0.5
expected = DataFrame({'a' : [0.5,-0.5,-1.5], 'b' : [0,1,2] })
assert_frame_equal(df,expected)
# GH 8607
# ix setitem consistency
df = DataFrame(
{'timestamp':[1413840976, 1413842580, 1413760580],
'delta':[1174, 904, 161],
'elapsed':[7673, 9277, 1470]
})
expected = DataFrame(
{'timestamp':pd.to_datetime([1413840976, 1413842580, 1413760580], unit='s'),
'delta':[1174, 904, 161],
'elapsed':[7673, 9277, 1470]
})
df2 = df.copy()
df2['timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
assert_frame_equal(df2,expected)
df2 = df.copy()
df2.loc[:,'timestamp'] = pd.to_datetime(df['timestamp'], unit='s')
assert_frame_equal(df2,expected)
df2 = df.copy()
df2.ix[:,2] = pd.to_datetime(df['timestamp'], unit='s')
assert_frame_equal(df2,expected)
def test_ix_loc_consistency(self):
# GH 8613
# some edge cases where ix/loc should return the same
# this is not an exhaustive case
def compare(result, expected):
if lib.isscalar(expected):
self.assertEqual(result, expected)
else:
self.assertTrue(expected.equals(result))
# failure cases for .loc, but these work for .ix
df = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'))
for key in [ slice(1,3), tuple([slice(0,2),slice(0,2)]), tuple([slice(0,2),df.columns[0:2]]) ]:
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeTimedeltaIndex ]:
df.index = index(len(df.index))
df.ix[key]
self.assertRaises(TypeError, lambda : df.loc[key])
df = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('2012-01-01', periods=5))
for key in [ '2012-01-03',
'2012-01-31',
slice('2012-01-03','2012-01-03'),
slice('2012-01-03','2012-01-04'),
slice('2012-01-03','2012-01-06',2),
slice('2012-01-03','2012-01-31'),
tuple([[True,True,True,False,True]]),
]:
# getitem
# if the expected raises, then compare the exceptions
try:
expected = df.ix[key]
except KeyError:
self.assertRaises(KeyError, lambda : df.loc[key])
continue
result = df.loc[key]
compare(result, expected)
# setitem
df1 = df.copy()
df2 = df.copy()
df1.ix[key] = 10
df2.loc[key] = 10
compare(df2, df1)
# edge cases
s = Series([1,2,3,4], index=list('abde'))
result1 = s['a':'c']
result2 = s.ix['a':'c']
result3 = s.loc['a':'c']
assert_series_equal(result1,result2)
assert_series_equal(result1,result3)
# now work rather than raising KeyError
s = Series(range(5),[-2,-1,1,2,3])
result1 = s.ix[-10:3]
result2 = s.loc[-10:3]
assert_series_equal(result1,result2)
result1 = s.ix[0:3]
result2 = s.loc[0:3]
assert_series_equal(result1,result2)
def test_setitem_multiindex(self):
for index_fn in ('ix', 'loc'):
def check(target, indexers, value, compare_fn, expected=None):
fn = getattr(target, index_fn)
fn.__setitem__(indexers, value)
result = fn.__getitem__(indexers)
if expected is None:
expected = value
compare_fn(result, expected)
# GH7190
index = pd.MultiIndex.from_product([np.arange(0,100), np.arange(0, 80)], names=['time', 'firm'])
t, n = 0, 2
df = DataFrame(np.nan,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
check(
target=df, indexers=((t,n), 'X'),
value=0, compare_fn=self.assertEqual
)
df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
check(
target=df, indexers=((t,n), 'X'),
value=1, compare_fn=self.assertEqual
)
df = DataFrame(columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
check(
target=df, indexers=((t,n), 'X'),
value=2, compare_fn=self.assertEqual
)
# GH 7218, assinging with 0-dim arrays
df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
check(
target=df, indexers=((t,n), 'X'),
value=np.array(3), compare_fn=self.assertEqual,
expected=3,
)
# GH5206
df = pd.DataFrame(
np.arange(25).reshape(5, 5), columns='A,B,C,D,E'.split(','),
dtype=float
)
df['F'] = 99
row_selection = df['A'] % 2 == 0
col_selection = ['B', 'C']
df.ix[row_selection, col_selection] = df['F']
output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
assert_frame_equal(df.ix[row_selection, col_selection], output)
check(
target=df, indexers=(row_selection, col_selection),
value=df['F'], compare_fn=assert_frame_equal,
expected=output,
)
# GH11372
idx = pd.MultiIndex.from_product([
['A', 'B', 'C'],
pd.date_range('2015-01-01', '2015-04-01', freq='MS')
])
cols = pd.MultiIndex.from_product([
['foo', 'bar'],
pd.date_range('2016-01-01', '2016-02-01', freq='MS')
])
df = pd.DataFrame(np.random.random((12, 4)), index=idx, columns=cols)
subidx = pd.MultiIndex.from_tuples(
[('A', pd.Timestamp('2015-01-01')), ('A', pd.Timestamp('2015-02-01'))]
)
subcols = pd.MultiIndex.from_tuples(
[('foo', pd.Timestamp('2016-01-01')), ('foo', pd.Timestamp('2016-02-01'))]
)
vals = pd.DataFrame(np.random.random((2, 2)), index=subidx, columns=subcols)
check(
target=df, indexers=(subidx, subcols),
value=vals, compare_fn=assert_frame_equal,
)
# set all columns
vals = pd.DataFrame(np.random.random((2, 4)), index=subidx, columns=cols)
check(
target=df, indexers=(subidx, slice(None, None, None)),
value=vals, compare_fn=assert_frame_equal,
)
# identity
copy = df.copy()
check(
target=df, indexers=(df.index, df.columns),
value=df, compare_fn=assert_frame_equal,
expected=copy
)
def test_indexing_with_datetime_tz(self):
# 8260
# support datetime64 with tz
idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),
name='foo')
dr = date_range('20130110',periods=3)
df = DataFrame({'A' : idx, 'B' : dr})
df['C'] = idx
df.iloc[1,1] = pd.NaT
df.iloc[1,2] = pd.NaT
# indexing
result = df.iloc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
assert_series_equal(result, expected)
result = df.loc[1]
expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
index=list('ABC'), dtype='object', name=1)
assert_series_equal(result, expected)
# indexing - fast_xs
df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
result = df.iloc[5]
expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', offset='D')
self.assertEqual(result, expected)
result = df.loc[5]
self.assertEqual(result, expected)
# indexing - boolean
result = df[df.a > df.a[3]]
expected = df.iloc[4:]
assert_frame_equal(result, expected)
# indexing - setting an element
df = DataFrame( data = pd.to_datetime(['2015-03-30 20:12:32','2015-03-12 00:11:11']) ,columns=['time'] )
df['new_col']=['new','old']
df.time=df.set_index('time').index.tz_localize('UTC')
v = df[df.new_col=='new'].set_index('time').index.tz_convert('US/Pacific')
# trying to set a single element on a part of a different timezone
def f():
df.loc[df.new_col=='new','time'] = v
self.assertRaises(ValueError, f)
v = df.loc[df.new_col=='new','time'] + pd.Timedelta('1s')
df.loc[df.new_col=='new','time'] = v
assert_series_equal(df.loc[df.new_col=='new','time'],v)
def test_loc_setitem_dups(self):
# GH 6541
df_orig = DataFrame({'me' : list('rttti'),
'foo': list('aaade'),
'bar': np.arange(5,dtype='float64')*1.34+2,
'bar2': np.arange(5,dtype='float64')*-.34+2}).set_index('me')
indexer = tuple(['r',['bar','bar2']])
df = df_orig.copy()
df.loc[indexer]*=2.0
assert_series_equal(df.loc[indexer],2.0*df_orig.loc[indexer])
indexer = tuple(['r','bar'])
df = df_orig.copy()
df.loc[indexer]*=2.0
self.assertEqual(df.loc[indexer],2.0*df_orig.loc[indexer])
indexer = tuple(['t',['bar','bar2']])
df = df_orig.copy()
df.loc[indexer]*=2.0
assert_frame_equal(df.loc[indexer],2.0*df_orig.loc[indexer])
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A':None, 'B':1},{'A':2, 'B':2}])
df2 = DataFrame([{'A':3, 'B':3},{'A':4, 'B':4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask,0] = df.iloc[mask,2]
assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({ 0 : [1,2], 1 : [3,4] })
expected.columns=['B','B']
del df['A']
assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0,1],[0,1]] = df.iloc[[0,1],[0,1]]
assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1,0],[0,1]] = df.iloc[[1,0],[0,1]].reset_index(drop=True)
df.iloc[[1,0],[0,1]] = df.iloc[[1,0],[0,1]].reset_index(drop=True)
assert_frame_equal(df, expected)
def test_chained_getitem_with_lists(self):
# GH6394
# Regression in chained getitem indexing with embedded list-like from 0.12
def check(result, expected):
tm.assert_numpy_array_equal(result,expected)
tm.assertIsInstance(result, np.ndarray)
df = DataFrame({'A': 5*[np.zeros(3)], 'B':5*[np.ones(3)]})
expected = df['A'].iloc[2]
result = df.loc[2,'A']
check(result, expected)
result2 = df.iloc[2]['A']
check(result2, expected)
result3 = df['A'].loc[2]
check(result3, expected)
result4 = df['A'].iloc[2]
check(result4, expected)
def test_loc_getitem_int(self):
# int label
self.check_result('int label', 'loc', 2, 'ix', 2, typs = ['ints'], axes = 0)
self.check_result('int label', 'loc', 3, 'ix', 3, typs = ['ints'], axes = 1)
self.check_result('int label', 'loc', 4, 'ix', 4, typs = ['ints'], axes = 2)
self.check_result('int label', 'loc', 2, 'ix', 2, typs = ['label'], fails = KeyError)
def test_loc_getitem_label(self):
# label
self.check_result('label', 'loc', 'c', 'ix', 'c', typs = ['labels'], axes=0)
self.check_result('label', 'loc', 'null', 'ix', 'null', typs = ['mixed'] , axes=0)
self.check_result('label', 'loc', 8, 'ix', 8, typs = ['mixed'] , axes=0)
self.check_result('label', 'loc', Timestamp('20130102'), 'ix', 1, typs = ['ts'], axes=0)
self.check_result('label', 'loc', 'c', 'ix', 'c', typs = ['empty'], fails = KeyError)
def test_loc_getitem_label_out_of_range(self):
# out of range label
self.check_result('label range', 'loc', 'f', 'ix', 'f', typs = ['ints','labels','mixed','ts'], fails=KeyError)
self.check_result('label range', 'loc', 'f', 'ix', 'f', typs = ['floats'], fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs = ['ints','labels','mixed'], fails=KeyError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs = ['ts'], axes=0, fails=TypeError)
self.check_result('label range', 'loc', 20, 'ix', 20, typs = ['floats'], axes=0, fails=TypeError)
def test_loc_getitem_label_list(self):
# list of labels
self.check_result('list lbl', 'loc', [0,2,4], 'ix', [0,2,4], typs = ['ints'], axes=0)
self.check_result('list lbl', 'loc', [3,6,9], 'ix', [3,6,9], typs = ['ints'], axes=1)
self.check_result('list lbl', 'loc', [4,8,12], 'ix', [4,8,12], typs = ['ints'], axes=2)
self.check_result('list lbl', 'loc', ['a','b','d'], 'ix', ['a','b','d'], typs = ['labels'], axes=0)
self.check_result('list lbl', 'loc', ['A','B','C'], 'ix', ['A','B','C'], typs = ['labels'], axes=1)
self.check_result('list lbl', 'loc', ['Z','Y','W'], 'ix', ['Z','Y','W'], typs = ['labels'], axes=2)
self.check_result('list lbl', 'loc', [2,8,'null'], 'ix', [2,8,'null'], typs = ['mixed'], axes=0)
self.check_result('list lbl', 'loc', [Timestamp('20130102'),Timestamp('20130103')], 'ix',
[Timestamp('20130102'),Timestamp('20130103')], typs = ['ts'], axes=0)
self.check_result('list lbl', 'loc', [0,1,2], 'indexer', [0,1,2], typs = ['empty'], fails = KeyError)
self.check_result('list lbl', 'loc', [0,2,3], 'ix', [0,2,3], typs = ['ints'], axes=0, fails = KeyError)
self.check_result('list lbl', 'loc', [3,6,7], 'ix', [3,6,7], typs = ['ints'], axes=1, fails = KeyError)
self.check_result('list lbl', 'loc', [4,8,10], 'ix', [4,8,10], typs = ['ints'], axes=2, fails = KeyError)
# fails
self.check_result('list lbl', 'loc', [20,30,40], 'ix', [20,30,40], typs = ['ints'], axes=1, fails = KeyError)
self.check_result('list lbl', 'loc', [20,30,40], 'ix', [20,30,40], typs = ['ints'], axes=2, fails = KeyError)
# array like
self.check_result('array like', 'loc', Series(index=[0,2,4]).index, 'ix', [0,2,4], typs = ['ints'], axes=0)
self.check_result('array like', 'loc', Series(index=[3,6,9]).index, 'ix', [3,6,9], typs = ['ints'], axes=1)
self.check_result('array like', 'loc', Series(index=[4,8,12]).index, 'ix', [4,8,12], typs = ['ints'], axes=2)
def test_loc_getitem_bool(self):
# boolean indexers
b = [True,False,True,False]
self.check_result('bool', 'loc', b, 'ix', b, typs = ['ints','labels','mixed','ts','floats'])
self.check_result('bool', 'loc', b, 'ix', b, typs = ['empty'], fails = KeyError)
def test_loc_getitem_int_slice(self):
# ok
self.check_result('int slice2', 'loc', slice(2,4), 'ix', [2,4], typs = ['ints'], axes = 0)
self.check_result('int slice2', 'loc', slice(3,6), 'ix', [3,6], typs = ['ints'], axes = 1)
self.check_result('int slice2', 'loc', slice(4,8), 'ix', [4,8], typs = ['ints'], axes = 2)
# GH 3053
# loc should treat integer slices like label slices
from itertools import product
index = MultiIndex.from_tuples([t for t in product([6,7,8], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[6:8,:]
expected = df.ix[6:8,:]
assert_frame_equal(result,expected)
index = MultiIndex.from_tuples([t for t in product([10, 20, 30], ['a', 'b'])])
df = DataFrame(np.random.randn(6, 6), index, index)
result = df.loc[20:30,:]
expected = df.ix[20:30,:]
assert_frame_equal(result,expected)
# doc examples
result = df.loc[10,:]
expected = df.ix[10,:]
assert_frame_equal(result,expected)
result = df.loc[:,10]
#expected = df.ix[:,10] (this fails)
expected = df[10]
assert_frame_equal(result,expected)
def test_loc_to_fail(self):
# GH3449
df = DataFrame(np.random.random((3, 3)),
index=['a', 'b', 'c'],
columns=['e', 'f', 'g'])
# raise a KeyError?
self.assertRaises(KeyError, df.loc.__getitem__, tuple([[1, 2], [1, 2]]))
# GH 7496
# loc should not fallback
s = Series()
s.loc[1] = 1
s.loc['a'] = 2
self.assertRaises(KeyError, lambda : s.loc[-1])
self.assertRaises(KeyError, lambda : s.loc[[-1, -2]])
self.assertRaises(KeyError, lambda : s.loc[['4']])
s.loc[-1] = 3
result = s.loc[[-1,-2]]
expected = Series([3,np.nan],index=[-1,-2])
assert_series_equal(result, expected)
s['a'] = 2
self.assertRaises(KeyError, lambda : s.loc[[-2]])
del s['a']
def f():
s.loc[[-2]] = 0
self.assertRaises(KeyError, f)
# inconsistency between .loc[values] and .loc[values,:]
# GH 7999
df = DataFrame([['a'],['b']],index=[1,2],columns=['value'])
def f():
df.loc[[3],:]
self.assertRaises(KeyError, f)
def f():
df.loc[[3]]
self.assertRaises(KeyError, f)
# at should not fallback
# GH 7814
s = Series([1,2,3], index=list('abc'))
result = s.at['a']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda : s.at[0])
df = DataFrame({'A' : [1,2,3]},index=list('abc'))
result = df.at['a','A']
self.assertEqual(result, 1)
self.assertRaises(ValueError, lambda : df.at['a',0])
s = Series([1,2,3], index=[3,2,1])
result = s.at[1]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda : s.at['a'])
df = DataFrame({0 : [1,2,3]},index=[3,2,1])
result = df.at[1,0]
self.assertEqual(result, 3)
self.assertRaises(ValueError, lambda : df.at['a',0])
def test_loc_getitem_label_slice(self):
# label slices (with ints)
self.check_result('lab slice', 'loc', slice(1,3), 'ix', slice(1,3), typs = ['labels','mixed','empty','ts','floats'], fails=TypeError)
# real label slices
self.check_result('lab slice', 'loc', slice('a','c'), 'ix', slice('a','c'), typs = ['labels'], axes=0)
self.check_result('lab slice', 'loc', slice('A','C'), 'ix', slice('A','C'), typs = ['labels'], axes=1)
self.check_result('lab slice', 'loc', slice('W','Z'), 'ix', slice('W','Z'), typs = ['labels'], axes=2)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=0)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=1, fails=TypeError)
self.check_result('ts slice', 'loc', slice('20130102','20130104'), 'ix', slice('20130102','20130104'), typs = ['ts'], axes=2, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=0, fails=TypeError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=1, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,8), 'ix', slice(2,8), typs = ['mixed'], axes=2, fails=KeyError)
self.check_result('mixed slice', 'loc', slice(2,4,2), 'ix', slice(2,4,2), typs = ['mixed'], axes=0, fails=TypeError)
def test_loc_general(self):
df = DataFrame(np.random.rand(4,4),columns=['A','B','C','D'], index=['A','B','C','D'])
# want this to work
result = df.loc[:,"A":"B"].iloc[0:2,:]
self.assertTrue((result.columns == ['A','B']).all() == True)
self.assertTrue((result.index == ['A','B']).all() == True)
# mixed type
result = DataFrame({ 'a' : [Timestamp('20130101')], 'b' : [1] }).iloc[0]
expected = Series([ Timestamp('20130101'), 1], index=['a','b'], name=0)
assert_series_equal(result, expected)
self.assertEqual(result.dtype, object)
def test_loc_setitem_consistency(self):
# GH 6149
# coerce similary for setitem and loc when rows have a null-slice
expected = DataFrame({ 'date': Series(0,index=range(5),dtype=np.int64),
'val' : Series(range(5),dtype=np.int64) })
df = DataFrame({ 'date': date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
df.loc[:,'date'] = 0
assert_frame_equal(df,expected)
df = DataFrame({ 'date': date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
df.loc[:,'date'] = np.array(0,dtype=np.int64)
assert_frame_equal(df,expected)
df = DataFrame({ 'date': date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
df.loc[:,'date'] = np.array([0,0,0,0,0],dtype=np.int64)
assert_frame_equal(df,expected)
expected = DataFrame({ 'date': Series('foo',index=range(5)),
'val' : Series(range(5),dtype=np.int64) })
df = DataFrame({ 'date': date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
df.loc[:,'date'] = 'foo'
assert_frame_equal(df,expected)
expected = DataFrame({ 'date': Series(1.0,index=range(5)),
'val' : Series(range(5),dtype=np.int64) })
df = DataFrame({ 'date': date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
df.loc[:,'date'] = 1.0
assert_frame_equal(df,expected)
# empty (essentially noops)
expected = DataFrame(columns=['x', 'y'])
expected['x'] = expected['x'].astype(np.int64)
df = DataFrame(columns=['x', 'y'])
df.loc[:, 'x'] = 1
assert_frame_equal(df,expected)
df = DataFrame(columns=['x', 'y'])
df['x'] = 1
assert_frame_equal(df,expected)
# .loc[:,column] setting with slice == len of the column
# GH10408
data = """Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat
Level_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse
Region,Site,RespondentID,,,,,
Region_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,
Region_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes
Region_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,
Region_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No"""
df = pd.read_csv(StringIO(data),header=[0,1], index_col=[0,1,2])
df.loc[:,('Respondent','StartDate')] = pd.to_datetime(df.loc[:,('Respondent','StartDate')])
df.loc[:,('Respondent','EndDate')] = pd.to_datetime(df.loc[:,('Respondent','EndDate')])
df.loc[:,('Respondent','Duration')] = df.loc[:,('Respondent','EndDate')] - df.loc[:,('Respondent','StartDate')]
df.loc[:,('Respondent','Duration')] = df.loc[:,('Respondent','Duration')].astype('timedelta64[s]')
expected = Series([1380,720,840,2160.],index=df.index,name=('Respondent','Duration'))
assert_series_equal(df[('Respondent','Duration')],expected)
def test_loc_setitem_frame(self):
df = self.frame_labels
result = df.iloc[0,0]
df.loc['a','A'] = 1
result = df.loc['a','A']
self.assertEqual(result, 1)
result = df.iloc[0,0]
self.assertEqual(result, 1)
df.loc[:,'B':'D'] = 0
expected = df.loc[:,'B':'D']
result = df.ix[:,1:]
assert_frame_equal(result, expected)
# GH 6254
# setting issue
df = DataFrame(index=[3, 5, 4], columns=['A'])
df.loc[[4, 3, 5], 'A'] = np.array([1, 2, 3],dtype='int64')
expected = DataFrame(dict(A = Series([1,2,3],index=[4, 3, 5]))).reindex(index=[3,5,4])
assert_frame_equal(df, expected)
# GH 6252
# setting with an empty frame
keys1 = ['@' + str(i) for i in range(5)]
val1 = np.arange(5,dtype='int64')
keys2 = ['@' + str(i) for i in range(4)]
val2 = np.arange(4,dtype='int64')
index = list(set(keys1).union(keys2))
df = DataFrame(index = index)
df['A'] = nan
df.loc[keys1, 'A'] = val1
df['B'] = nan
df.loc[keys2, 'B'] = val2
expected = DataFrame(dict(A = Series(val1,index=keys1), B = Series(val2,index=keys2))).reindex(index=index)
assert_frame_equal(df, expected)
# GH 8669
# invalid coercion of nan -> int
df = DataFrame({'A' : [1,2,3], 'B' : np.nan })
df.loc[df.B > df.A, 'B'] = df.A
expected = DataFrame({'A' : [1,2,3], 'B' : np.nan})
assert_frame_equal(df, expected)
# GH 6546
# setting with mixed labels
df = DataFrame({1:[1,2],2:[3,4],'a':['a','b']})
result = df.loc[0, [1,2]]
expected = Series([1,3],index=[1,2],dtype=object, name=0)
assert_series_equal(result, expected)
expected = DataFrame({1:[5,2],2:[6,4],'a':['a','b']})
df.loc[0, [1,2]] = [5,6]
assert_frame_equal(df, expected)
def test_loc_setitem_frame_multiples(self):
# multiple setting
df = DataFrame({ 'A' : ['foo','bar','baz'],
'B' : Series(range(3),dtype=np.int64) })
rhs = df.loc[1:2]
rhs.index = df.index[0:2]
df.loc[0:1] = rhs
expected = DataFrame({ 'A' : ['bar','baz','baz'],
'B' : Series([1,2,2],dtype=np.int64) })
assert_frame_equal(df, expected)
# multiple setting with frame on rhs (with M8)
df = DataFrame({ 'date' : date_range('2000-01-01','2000-01-5'),
'val' : Series(range(5),dtype=np.int64) })
expected = DataFrame({ 'date' : [Timestamp('20000101'),Timestamp('20000102'),Timestamp('20000101'),
Timestamp('20000102'),Timestamp('20000103')],
'val' : Series([0,1,0,1,2],dtype=np.int64) })
rhs = df.loc[0:2]
rhs.index = df.index[2:5]
df.loc[2:4] = rhs
assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2), columns=lrange(0,8,2))
result = df.iloc[2]
exp = df.ix[4]
assert_series_equal(result, exp)
result = df.iloc[2,2]
exp = df.ix[4,4]
self.assertEqual(result, exp)
# slice
result = df.iloc[4:8]
expected = df.ix[8:14]
assert_frame_equal(result, expected)
result = df.iloc[:,2:3]
expected = df.ix[:,4:5]
assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0,1,3]]
expected = df.ix[[0,2,6]]
assert_frame_equal(result, expected)
result = df.iloc[[0,1,3],[0,1]]
expected = df.ix[[0,2,6],[0,2]]
assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1,1,3],[-1,1]]
expected = df.ix[[18,2,6],[6,2]]
assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1,-1,1,3],[-1,1]]
expected = df.ix[[18,18,2,6],[6,2]]
assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1,5))
result = df.iloc[s.index]
expected = df.ix[[2,4,6,8]]
assert_frame_equal(result, expected)
# try with labelled frame
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1,1]
exp = df.ix['b','B']
self.assertEqual(result, exp)
result = df.iloc[:,2:3]
expected = df.ix[:,['C']]
assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1,-1]
exp = df.ix['j','D']
self.assertEqual(result, exp)
# out-of-bounds exception
self.assertRaises(IndexError, df.iloc.__getitem__, tuple([10,5]))
# trying to use a label
self.assertRaises(ValueError, df.iloc.__getitem__, tuple(['j','D']))
def test_iloc_getitem_panel(self):
# GH 7189
p = Panel(np.arange(4*3*2).reshape(4,3,2),
items=['A','B','C','D'],
major_axis=['a','b','c'],
minor_axis=['one','two'])
result = p.iloc[1]
expected = p.loc['B']
assert_frame_equal(result, expected)
result = p.iloc[1,1]
expected = p.loc['B','b']
assert_series_equal(result, expected)
result = p.iloc[1,1,1]
expected = p.loc['B','b','two']
self.assertEqual(result,expected)
# slice
result = p.iloc[1:3]
expected = p.loc[['B','C']]
assert_panel_equal(result, expected)
result = p.iloc[:,0:2]
expected = p.loc[:,['a','b']]
assert_panel_equal(result, expected)
# list of integers
result = p.iloc[[0,2]]
expected = p.loc[['A','C']]
assert_panel_equal(result, expected)
# neg indicies
result = p.iloc[[-1,1],[-1,1]]
expected = p.loc[['D','B'],['c','b']]
assert_panel_equal(result, expected)
# dups indicies
result = p.iloc[[-1,-1,1],[-1,1]]
expected = p.loc[['D','D','B'],['c','b']]
assert_panel_equal(result, expected)
# combined
result = p.iloc[0,[True,True],[0,1]]
expected = p.loc['A',['a','b'],['one','two']]
assert_frame_equal(result, expected)
# out-of-bounds exception
self.assertRaises(IndexError, p.iloc.__getitem__, tuple([10,5]))
def f():
p.iloc[0,[True,True],[0,1,2]]
self.assertRaises(IndexError, f)
# trying to use a label
self.assertRaises(ValueError, p.iloc.__getitem__, tuple(['j','D']))
# GH
p = Panel(np.random.rand(4,3,2), items=['A','B','C','D'], major_axis=['U','V','W'], minor_axis=['X','Y'])
expected = p['A']
result = p.iloc[0,:,:]
assert_frame_equal(result, expected)
result = p.iloc[0,[True,True,True],:]
assert_frame_equal(result, expected)
result = p.iloc[0,[True,True,True],[0,1]]
assert_frame_equal(result, expected)
def f():
p.iloc[0,[True,True,True],[0,1,2]]
self.assertRaises(IndexError, f)
def f():
p.iloc[0,[True,True,True],[2]]
self.assertRaises(IndexError, f)
# GH 7199
# Panel with multi-index
multi_index = pd.MultiIndex.from_tuples([('ONE', 'one'),
('TWO', 'two'),
('THREE', 'three')],
names=['UPPER', 'lower'])
simple_index = [x[0] for x in multi_index]
wd1 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=multi_index)
wd2 = Panel(items=['First', 'Second'],
major_axis=['a', 'b', 'c', 'd'],
minor_axis=simple_index)
expected1 = wd1['First'].iloc[[True, True, True, False], [0, 2]]
result1 = wd1.iloc[0, [True, True, True, False], [0, 2]] # WRONG
assert_frame_equal(result1,expected1)
expected2 = wd2['First'].iloc[[True, True, True, False], [0, 2]]
result2 = wd2.iloc[0, [True, True, True, False], [0, 2]]
assert_frame_equal(result2,expected2)
expected1 = DataFrame(index=['a'],columns=multi_index,dtype='float64')
result1 = wd1.iloc[0,[0],[0,1,2]]
assert_frame_equal(result1,expected1)
expected2 = DataFrame(index=['a'],columns=simple_index,dtype='float64')
result2 = wd2.iloc[0,[0],[0,1,2]]
assert_frame_equal(result2,expected2)
# GH 7516
mi = MultiIndex.from_tuples([(0,'x'), (1,'y'), (2,'z')])
p = Panel(np.arange(3*3*3,dtype='int64').reshape(3,3,3), items=['a','b','c'], major_axis=mi, minor_axis=['u','v','w'])
result = p.iloc[:, 1, 0]
expected = Series([3,12,21],index=['a','b','c'], name='u')
assert_series_equal(result,expected)
result = p.loc[:, (1,'y'), 'u']
assert_series_equal(result,expected)
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6,4)
index = date_range('20130101',periods=6)
columns = list('ABCD')
df = DataFrame(arr,index=index,columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5,0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5,0:2],index=index[3:5],columns=columns[0:2])
assert_frame_equal(result,expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5,0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5,0:2],index=index[3:5],columns=list('aa'))
assert_frame_equal(result,expected)
# related
arr = np.random.randn(6,4)
index = list(range(0,12,2))
columns = list(range(0,8,2))
df = DataFrame(arr,index=index,columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5,2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5,2:4],index=index[1:5],columns=columns[2:4])
assert_frame_equal(result,expected)
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(lrange(1,11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
# invalid
def f():
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23+0.1j, 2.2])
self.assertRaises(ValueError, f)
# valid
df.ix[2:5, 'bar'] = np.array([2.33j, 1.23+0.1j, 2.2, 1.0])
result = df.ix[2:5, 'bar']
expected = Series([2.33j, 1.23+0.1j, 2.2, 1.0], index=[2,3,4,5], name='bar')
assert_series_equal(result, expected)
# dtype getting changed?
df = DataFrame(index=Index(lrange(1,11)))
df['foo'] = np.zeros(10, dtype=np.float64)
df['bar'] = np.zeros(10, dtype=np.complex)
def f():
df[2:5] = np.arange(1,4)*1j
self.assertRaises(ValueError, f)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'), columns=list('ABCD'))
df.iloc[1,1] = 1
result = df.iloc[1,1]
self.assertEqual(result, 1)
df.iloc[:,2:3] = 0
expected = df.iloc[:,2:3]
result = df.iloc[:,2:3]
assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0,20,2))
s.iloc[1] = 1
result = s.iloc[1]
self.assertEqual(result, 1)
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
assert_series_equal(result, expected)
s= Series([-1]*6)
s.iloc[0::2]= [0,2,4]
s.iloc[1::2]= [1,3,5]
result = s
expected= Series([0,1,2,3,4,5])
assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A = np.arange(5,dtype='int64'), B = np.arange(5,10,dtype='int64')))
df.iloc[2:4] = [[10,11],[12,13]]
expected = DataFrame(dict(A = [0,1,10,12,4], B = [5,6,11,13,9]))
assert_frame_equal(df, expected)
df = DataFrame(dict(A = list('abcde'), B = np.arange(5,10,dtype='int64')))
df.iloc[2:4] = [['x',11],['y',13]]
expected = DataFrame(dict(A = ['a','b','x','y','e'], B = [5,6,11,13,9]))
assert_frame_equal(df, expected)
def test_iloc_getitem_multiindex(self):
mi_labels = DataFrame(np.random.randn(4, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j', 'k'], ['X', 'X', 'Y','Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
# the first row
rs = mi_int.iloc[0]
xp = mi_int.ix[4].ix[8]
assert_series_equal(rs, xp, check_names=False)
self.assertEqual(rs.name, (4, 8))
self.assertEqual(xp.name, 8)
# 2nd (last) columns
rs = mi_int.iloc[:,2]
xp = mi_int.ix[:,2]
assert_series_equal(rs, xp)
# corner column
rs = mi_int.iloc[2,2]
xp = mi_int.ix[:,2].ix[2]
self.assertEqual(rs, xp)
# this is basically regular indexing
rs = mi_labels.iloc[2,2]
xp = mi_labels.ix['j'].ix[:,'j'].ix[0,0]
self.assertEqual(rs, xp)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],
['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2,2,4],[6,8,10]],
index=[[4,4,8],[8,10,12]])
# the first row
rs = mi_labels.loc['i']
xp = mi_labels.ix['i']
assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:,'j']
xp = mi_labels.ix[:,'j']
assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:,'j']
xp = mi_labels.ix['j'].ix[:,'j']
assert_frame_equal(rs,xp)
# with a tuple
rs = mi_labels.loc[('i','X')]
xp = mi_labels.ix[('i','X')]
assert_frame_equal(rs,xp)
rs = mi_int.loc[4]
xp = mi_int.ix[4]
assert_frame_equal(rs,xp)
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes,attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1,1),index=pd.MultiIndex.from_product([[1,2,3,4],[1,2,3]]))
expected = df.loc[([1,2],),:]
result = df.loc[[1,2]]
assert_frame_equal(result, expected)
# GH 7399
# incomplete indexers
s = pd.Series(np.arange(15,dtype='int64'),MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
assert_series_equal(result, expected)
assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
assert_series_equal(result, expected)
assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
assert_series_equal(result, expected)
assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = pd.Series(np.arange(15,dtype='int64'),MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6,7,8,12,13,14]]
result = s.loc[2:4:2, 'a':'c']
assert_series_equal(result, expected)
def test_multiindex_perf_warn(self):
if sys.version_info < (2, 7):
raise nose.SkipTest('python version < 2.7')
df = DataFrame({'jim':[0, 0, 1, 1],
'joe':['x', 'x', 'z', 'y'],
'jolie':np.random.rand(4)}).set_index(['jim', 'joe'])
with tm.assert_produces_warning(PerformanceWarning, clear=[pd.core.index]):
_ = df.loc[(1, 'z')]
df = df.iloc[[2,1,3,0]]
with tm.assert_produces_warning(PerformanceWarning):
_ = df.loc[(0,)]
@slow
def test_multiindex_get_loc(self): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
def validate(mi, df, key):
mask = np.ones(len(df)).astype('bool')
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
self.assertNotIn(key[:i+1], mi.index)
continue
self.assertIn(key[:i+1], mi.index)
right = df[mask].copy()
if i + 1 != len(key): # partial key
right.drop(cols[:i+1], axis=1, inplace=True)
right.set_index(cols[i+1:-1], inplace=True)
assert_frame_equal(mi.loc[key[:i+1]], right)
else: # full key
right.set_index(cols[:-1], inplace=True)
if len(right) == 1: # single hit
right = Series(right['jolia'].values,
name=right.index[0], index=['jolia'])
assert_series_equal(mi.loc[key[:i+1]], right)
else: # multi hit
assert_frame_equal(mi.loc[key[:i+1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [randint(0, 10, n), choice(list('abcdefghij'), n),
choice(pd.date_range('20141009', periods=10).tolist(), n),
choice(list('ZYXWVUTSRQ'), n), randn(n)]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [randint(0, 11, m), choice(list('abcdefghijk'), m),
choice(pd.date_range('20141009', periods=11).tolist(), m),
choice(list('ZYXWVUTSRQP'), m)]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[::n//m]))
# covers both unique index and non-unique index
df = pd.DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
def test_series_getitem_multiindex(self):
# GH 6018
# series regression getitem with a multi-index
s = Series([1,2,3])
s.index = MultiIndex.from_tuples([(0,0),(1,1), (2,1)])
result = s[:,0]
expected = Series([1],index=[0])
assert_series_equal(result,expected)
result = s.ix[:,1]
expected = Series([2,3],index=[1,2])
assert_series_equal(result,expected)
# xs
result = s.xs(0,level=0)
expected = Series([1],index=[0])
assert_series_equal(result,expected)
result = s.xs(1,level=1)
expected = Series([2,3],index=[1,2])
assert_series_equal(result,expected)
# GH6258
s = Series([1,3,4,1,3,4],
index=MultiIndex.from_product([list('AB'),
list(date_range('20130903',periods=3))]))
result = s.xs('20130903',level=1)
expected = Series([1,1],index=list('AB'))
assert_series_equal(result,expected)
# GH5684
idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'),
('b', 'one'), ('b', 'two')])
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(['L1', 'L2'], inplace=True)
result = s.xs('one', level='L2')
expected = Series([1, 3], index=['a', 'b'])
expected.index.set_names(['L1'], inplace=True)
assert_series_equal(result, expected)
def test_ix_general(self):
# ix general issues
# GH 2817
data = {'amount': {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
'col': {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
'year': {0: 2012, 1: 2011, 2: 2012, 3: 2012, 4: 2012}}
df = DataFrame(data).set_index(keys=['col', 'year'])
key = 4.0, 2012
# emits a PerformanceWarning, ok
with self.assert_produces_warning(PerformanceWarning):
tm.assert_frame_equal(df.ix[key], df.iloc[2:])
# this is ok
df.sortlevel(inplace=True)
res = df.ix[key]
# col has float dtype, result should be Float64Index
index = MultiIndex.from_arrays([[4.] * 3, [2012] * 3],
names=['col', 'year'])
expected = DataFrame({'amount': [222, 333, 444]}, index=index)
tm.assert_frame_equal(res, expected)
def test_ix_weird_slicing(self):
## http://stackoverflow.com/q/17056560/1240268
df = DataFrame({'one' : [1, 2, 3, np.nan, np.nan], 'two' : [1, 2, 3, 4, 5]})
df.ix[df['one']>1, 'two'] = -df['two']
expected = DataFrame({'one': {0: 1.0, 1: 2.0, 2: 3.0, 3: nan, 4: nan},
'two': {0: 1, 1: -2, 2: -3, 3: 4, 4: 5}})
assert_frame_equal(df, expected)
def test_xs_multiindex(self):
# GH2903
columns = MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'hello'), ('b', 'world')], names=['lvl0', 'lvl1'])
df = DataFrame(np.random.randn(4, 4), columns=columns)
df.sortlevel(axis=1,inplace=True)
result = df.xs('a', level='lvl0', axis=1)
expected = df.iloc[:,0:2].loc[:,'a']
assert_frame_equal(result,expected)
result = df.xs('foo', level='lvl1', axis=1)
expected = df.iloc[:, 1:2].copy()
expected.columns = expected.columns.droplevel('lvl1')
assert_frame_equal(result, expected)
def test_per_axis_per_level_getitem(self):
# GH6134
# example test case
ix = MultiIndex.from_product([_mklbl('A',5),_mklbl('B',7),_mklbl('C',4),_mklbl('D',2)])
df = DataFrame(np.arange(len(ix.get_values())),index=ix)
result = df.loc[(slice('A1','A3'),slice(None), ['C1','C3']),:]
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
a == 'A1' or a == 'A2' or a == 'A3') and (c == 'C1' or c == 'C3')]]
assert_frame_equal(result, expected)
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
a == 'A1' or a == 'A2' or a == 'A3') and (c == 'C1' or c == 'C2' or c == 'C3')]]
result = df.loc[(slice('A1','A3'),slice(None), slice('C1','C3')),:]
assert_frame_equal(result, expected)
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A',1),('A',2),('A',3),('B',1)],
names=['one','two'])
columns = MultiIndex.from_tuples([('a','foo'),('a','bar'),('b','foo'),('b','bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(16,dtype='int64').reshape(4, 4), index=index, columns=columns)
df = df.sortlevel(axis=0).sortlevel(axis=1)
# identity
result = df.loc[(slice(None),slice(None)),:]
assert_frame_equal(result, df)
result = df.loc[(slice(None),slice(None)),(slice(None),slice(None))]
assert_frame_equal(result, df)
result = df.loc[:,(slice(None),slice(None))]
assert_frame_equal(result, df)
# index
result = df.loc[(slice(None),[1]),:]
expected = df.iloc[[0,3]]
assert_frame_equal(result, expected)
result = df.loc[(slice(None),1),:]
expected = df.iloc[[0,3]]
assert_frame_equal(result, expected)
# columns
result = df.loc[:,(slice(None),['foo'])]
expected = df.iloc[:,[1,3]]
assert_frame_equal(result, expected)
# both
result = df.loc[(slice(None),1),(slice(None),['foo'])]
expected = df.iloc[[0,3],[1,3]]
assert_frame_equal(result, expected)
result = df.loc['A','a']
expected = DataFrame(dict(bar = [1,5,9], foo = [0,4,8]),
index=Index([1,2,3],name='two'),
columns=Index(['bar','foo'],name='lvl1'))
assert_frame_equal(result, expected)
result = df.loc[(slice(None),[1,2]),:]
expected = df.iloc[[0,1,3]]
assert_frame_equal(result, expected)
# multi-level series
s = Series(np.arange(len(ix.get_values())),index=ix)
result = s.loc['A1':'A3', :, ['C1','C3']]
expected = s.loc[[ tuple([a,b,c,d]) for a,b,c,d in s.index.values if (
a == 'A1' or a == 'A2' or a == 'A3') and (c == 'C1' or c == 'C3')]]
assert_series_equal(result, expected)
# boolean indexers
result = df.loc[(slice(None),df.loc[:,('a','bar')]>5),:]
expected = df.iloc[[2,3]]
assert_frame_equal(result, expected)
def f():
df.loc[(slice(None),np.array([True,False])),:]
self.assertRaises(ValueError, f)
# ambiguous cases
# these can be multiply interpreted (e.g. in this case
# as df.loc[slice(None),[1]] as well
self.assertRaises(KeyError, lambda : df.loc[slice(None),[1]])
result = df.loc[(slice(None),[1]),:]
expected = df.iloc[[0,3]]
assert_frame_equal(result, expected)
# not lexsorted
self.assertEqual(df.index.lexsort_depth,2)
df = df.sortlevel(level=1,axis=0)
self.assertEqual(df.index.lexsort_depth,0)
with tm.assertRaisesRegexp(KeyError, 'MultiIndex Slicing requires the index to be fully lexsorted tuple len \(2\), lexsort depth \(0\)'):
df.loc[(slice(None),df.loc[:,('a','bar')]>5),:]
def test_multiindex_slicers_non_unique(self):
# GH 7106
# non-unique mi index support
df = DataFrame(dict(A = ['foo','foo','foo','foo'],
B = ['a','a','a','a'],
C = [1,2,1,3],
D = [1,2,3,4])).set_index(['A','B','C']).sortlevel()
self.assertFalse(df.index.is_unique)
expected = DataFrame(dict(A = ['foo','foo'],
B = ['a','a'],
C = [1,1],
D = [1,3])).set_index(['A','B','C']).sortlevel()
result = df.loc[(slice(None),slice(None),1),:]
assert_frame_equal(result, expected)
# this is equivalent of an xs expression
result = df.xs(1,level=2,drop_level=False)
assert_frame_equal(result, expected)
df = DataFrame(dict(A = ['foo','foo','foo','foo'],
B = ['a','a','a','a'],
C = [1,2,1,2],
D = [1,2,3,4])).set_index(['A','B','C']).sortlevel()
self.assertFalse(df.index.is_unique)
expected = DataFrame(dict(A = ['foo','foo'],
B = ['a','a'],
C = [1,1],
D = [1,3])).set_index(['A','B','C']).sortlevel()
result = df.loc[(slice(None),slice(None),1),:]
self.assertFalse(result.index.is_unique)
assert_frame_equal(result, expected)
def test_multiindex_slicers_datetimelike(self):
# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime
dates = [datetime.datetime(2012,1,1,12,12,12) + datetime.timedelta(days=i) for i in range(6)]
freq = [1,2]
index = MultiIndex.from_product([dates,freq], names=['date','frequency'])
df = DataFrame(np.arange(6*2*4,dtype='int64').reshape(-1,4),index=index,columns=list('ABCD'))
# multi-axis slicing
idx = pd.IndexSlice
expected = df.iloc[[0,2,4],[0,1]]
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),Timestamp('2012-01-03 12:12:12')),slice(1,1)), slice('A','B')]
assert_frame_equal(result,expected)
result = df.loc[(idx[Timestamp('2012-01-01 12:12:12'):Timestamp('2012-01-03 12:12:12')],idx[1:1]), slice('A','B')]
assert_frame_equal(result,expected)
result = df.loc[(slice(Timestamp('2012-01-01 12:12:12'),Timestamp('2012-01-03 12:12:12')),1), slice('A','B')]
assert_frame_equal(result,expected)
# with strings
result = df.loc[(slice('2012-01-01 12:12:12','2012-01-03 12:12:12'),slice(1,1)), slice('A','B')]
assert_frame_equal(result,expected)
result = df.loc[(idx['2012-01-01 12:12:12':'2012-01-03 12:12:12'],1), idx['A','B']]
assert_frame_equal(result,expected)
def test_multiindex_slicers_edges(self):
# GH 8132
# various edge cases
df = DataFrame({'A': ['A0'] * 5 + ['A1']*5 + ['A2']*5,
'B': ['B0','B0','B1','B1','B2'] * 3,
'DATE': ["2013-06-11",
"2013-07-02",
"2013-07-09",
"2013-07-30",
"2013-08-06",
"2013-06-11",
"2013-07-02",
"2013-07-09",
"2013-07-30",
"2013-08-06",
"2013-09-03",
"2013-10-01",
"2013-07-09",
"2013-08-06",
"2013-09-03"],
'VALUES': [22, 35, 14, 9, 4, 40, 18, 4, 2, 5, 1, 2, 3,4, 2]})
df['DATE'] = pd.to_datetime(df['DATE'])
df1 = df.set_index(['A', 'B', 'DATE'])
df1 = df1.sortlevel()
df2 = df.set_index('DATE')
# A1 - Get all values under "A0" and "A1"
result = df1.loc[(slice('A1')),:]
expected = df1.iloc[0:10]
assert_frame_equal(result, expected)
# A2 - Get all values from the start to "A2"
result = df1.loc[(slice('A2')),:]
expected = df1
assert_frame_equal(result, expected)
# A3 - Get all values under "B1" or "B2"
result = df1.loc[(slice(None),slice('B1','B2')),:]
expected = df1.iloc[[2,3,4,7,8,9,12,13,14]]
assert_frame_equal(result, expected)
# A4 - Get all values between 2013-07-02 and 2013-07-09
result = df1.loc[(slice(None),slice(None),slice('20130702','20130709')),:]
expected = df1.iloc[[1,2,6,7,12]]
assert_frame_equal(result, expected)
# B1 - Get all values in B0 that are also under A0, A1 and A2
result = df1.loc[(slice('A2'),slice('B0')),:]
expected = df1.iloc[[0,1,5,6,10,11]]
assert_frame_equal(result, expected)
# B2 - Get all values in B0, B1 and B2 (similar to what #2 is doing for the As)
result = df1.loc[(slice(None),slice('B2')),:]
expected = df1
assert_frame_equal(result, expected)
# B3 - Get all values from B1 to B2 and up to 2013-08-06
result = df1.loc[(slice(None),slice('B1','B2'),slice('2013-08-06')),:]
expected = df1.iloc[[2,3,4,7,8,9,12,13]]
assert_frame_equal(result, expected)
# B4 - Same as A4 but the start of the date slice is not a key.
# shows indexing on a partial selection slice
result = df1.loc[(slice(None),slice(None),slice('20130701','20130709')),:]
expected = df1.iloc[[1,2,6,7,12]]
assert_frame_equal(result, expected)
def test_per_axis_per_level_doc_examples(self):
# test index maker
idx = pd.IndexSlice
# from indexing.rst / advanced
index = MultiIndex.from_product([_mklbl('A',4),
_mklbl('B',2),
_mklbl('C',4),
_mklbl('D',2)])
columns = MultiIndex.from_tuples([('a','foo'),('a','bar'),
('b','foo'),('b','bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index)*len(columns),dtype='int64').reshape((len(index),len(columns))),
index=index,
columns=columns)
result = df.loc[(slice('A1','A3'),slice(None), ['C1','C3']),:]
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
a == 'A1' or a == 'A2' or a == 'A3') and (c == 'C1' or c == 'C3')]]
assert_frame_equal(result, expected)
result = df.loc[idx['A1':'A3',:,['C1','C3']],:]
assert_frame_equal(result, expected)
result = df.loc[(slice(None),slice(None), ['C1','C3']),:]
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
c == 'C1' or c == 'C3')]]
assert_frame_equal(result, expected)
result = df.loc[idx[:,:,['C1','C3']],:]
assert_frame_equal(result, expected)
# not sorted
def f():
df.loc['A1',(slice(None),'foo')]
self.assertRaises(KeyError, f)
df = df.sortlevel(axis=1)
# slicing
df.loc['A1',(slice(None),'foo')]
df.loc[(slice(None),slice(None), ['C1','C3']),(slice(None),'foo')]
# setitem
df.loc(axis=0)[:,:,['C1','C3']] = -10
def test_loc_arguments(self):
index = MultiIndex.from_product([_mklbl('A',4),
_mklbl('B',2),
_mklbl('C',4),
_mklbl('D',2)])
columns = MultiIndex.from_tuples([('a','foo'),('a','bar'),
('b','foo'),('b','bah')],
names=['lvl0', 'lvl1'])
df = DataFrame(np.arange(len(index)*len(columns),dtype='int64').reshape((len(index),len(columns))),
index=index,
columns=columns).sortlevel().sortlevel(axis=1)
# axis 0
result = df.loc(axis=0)['A1':'A3',:,['C1','C3']]
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
a == 'A1' or a == 'A2' or a == 'A3') and (c == 'C1' or c == 'C3')]]
assert_frame_equal(result, expected)
result = df.loc(axis='index')[:,:,['C1','C3']]
expected = df.loc[[ tuple([a,b,c,d]) for a,b,c,d in df.index.values if (
c == 'C1' or c == 'C3')]]
assert_frame_equal(result, expected)
# axis 1
result = df.loc(axis=1)[:,'foo']
expected = df.loc[:,(slice(None),'foo')]
assert_frame_equal(result, expected)
result = df.loc(axis='columns')[:,'foo']
expected = df.loc[:,(slice(None),'foo')]
assert_frame_equal(result, expected)
# invalid axis
def f():
df.loc(axis=-1)[:,:,['C1','C3']]
self.assertRaises(ValueError, f)
def f():
df.loc(axis=2)[:,:,['C1','C3']]
self.assertRaises(ValueError, f)
def f():
df.loc(axis='foo')[:,:,['C1','C3']]
self.assertRaises(ValueError, f)
def test_per_axis_per_level_setitem(self):
# test index maker
idx = pd.IndexSlice
# test multi-index slicing with per axis and per index controls
index = MultiIndex.from_tuples([('A',1),('A',2),('A',3),('B',1)],
names=['one','two'])
columns = MultiIndex.from_tuples([('a','foo'),('a','bar'),('b','foo'),('b','bah')],
names=['lvl0', 'lvl1'])
df_orig = DataFrame(np.arange(16,dtype='int64').reshape(4, 4), index=index, columns=columns)
df_orig = df_orig.sortlevel(axis=0).sortlevel(axis=1)
# identity
df = df_orig.copy()
df.loc[(slice(None),slice(None)),:] = 100
expected = df_orig.copy()
expected.iloc[:,:] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:,:] = 100
expected = df_orig.copy()
expected.iloc[:,:] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None),slice(None)),(slice(None),slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:,:] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:,(slice(None),slice(None))] = 100
expected = df_orig.copy()
expected.iloc[:,:] = 100
assert_frame_equal(df, expected)
# index
df = df_orig.copy()
df.loc[(slice(None),[1]),:] = 100
expected = df_orig.copy()
expected.iloc[[0,3]] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None),1),:] = 100
expected = df_orig.copy()
expected.iloc[[0,3]] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc(axis=0)[:,1] = 100
expected = df_orig.copy()
expected.iloc[[0,3]] = 100
assert_frame_equal(df, expected)
# columns
df = df_orig.copy()
df.loc[:,(slice(None),['foo'])] = 100
expected = df_orig.copy()
expected.iloc[:,[1,3]] = 100
assert_frame_equal(df, expected)
# both
df = df_orig.copy()
df.loc[(slice(None),1),(slice(None),['foo'])] = 100
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:,1],idx[:,['foo']]] = 100
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] = 100
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc['A','a'] = 100
expected = df_orig.copy()
expected.iloc[0:3,0:2] = 100
assert_frame_equal(df, expected)
# setting with a list-like
df = df_orig.copy()
df.loc[(slice(None),1),(slice(None),['foo'])] = np.array([[100, 100], [100, 100]],dtype='int64')
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] = 100
assert_frame_equal(df, expected)
# not enough values
df = df_orig.copy()
def f():
df.loc[(slice(None),1),(slice(None),['foo'])] = np.array([[100], [100, 100]],dtype='int64')
self.assertRaises(ValueError, f)
def f():
df.loc[(slice(None),1),(slice(None),['foo'])] = np.array([100, 100, 100, 100],dtype='int64')
self.assertRaises(ValueError, f)
# with an alignable rhs
df = df_orig.copy()
df.loc[(slice(None),1),(slice(None),['foo'])] = df.loc[(slice(None),1),(slice(None),['foo'])] * 5
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] = expected.iloc[[0,3],[1,3]] * 5
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[(slice(None),1),(slice(None),['foo'])] *= df.loc[(slice(None),1),(slice(None),['foo'])]
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] *= expected.iloc[[0,3],[1,3]]
assert_frame_equal(df, expected)
rhs = df_orig.loc[(slice(None),1),(slice(None),['foo'])].copy()
rhs.loc[:,('c','bah')] = 10
df = df_orig.copy()
df.loc[(slice(None),1),(slice(None),['foo'])] *= rhs
expected = df_orig.copy()
expected.iloc[[0,3],[1,3]] *= expected.iloc[[0,3],[1,3]]
assert_frame_equal(df, expected)
def test_multiindex_setitem(self):
# GH 3738
# setting with a multi-index right hand side
arrays = [np.array(['bar', 'bar', 'baz', 'qux', 'qux', 'bar']),
np.array(['one', 'two', 'one', 'one', 'two', 'one']),
np.arange(0, 6, 1)]
df_orig = pd.DataFrame(np.random.randn(6, 3),
index=arrays,
columns=['A', 'B', 'C']).sort_index()
expected = df_orig.loc[['bar']]*2
df = df_orig.copy()
df.loc[['bar']] *= 2
assert_frame_equal(df.loc[['bar']],expected)
# raise because these have differing levels
def f():
df.loc['bar'] *= 2
self.assertRaises(TypeError, f)
# from SO
#http://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
df_orig = DataFrame.from_dict({'price': {
('DE', 'Coal', 'Stock'): 2,
('DE', 'Gas', 'Stock'): 4,
('DE', 'Elec', 'Demand'): 1,
('FR', 'Gas', 'Stock'): 5,
('FR', 'Solar', 'SupIm'): 0,
('FR', 'Wind', 'SupIm'): 0}})
df_orig.index = MultiIndex.from_tuples(df_orig.index, names=['Sit', 'Com', 'Type'])
expected = df_orig.copy()
expected.iloc[[0,2,3]] *= 2
idx = pd.IndexSlice
df = df_orig.copy()
df.loc[idx[:,:,'Stock'],:] *= 2
assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[idx[:,:,'Stock'],'price'] *= 2
assert_frame_equal(df, expected)
def test_getitem_multiindex(self):
# GH 5725
# the 'A' happens to be a valid Timestamp so the doesn't raise the appropriate
# error, only in PY3 of course!
index = MultiIndex(levels=[['D', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
arr = np.random.randn(len(index),1)
df = DataFrame(arr,index=index,columns=['val'])
result = df.val['D']
expected = Series(arr.ravel()[0:3],name='val',index=Index([26,37,57],name='day'))
assert_series_equal(result,expected)
def f():
df.val['A']
self.assertRaises(KeyError, f)
def f():
df.val['X']
self.assertRaises(KeyError, f)
# A is treated as a special Timestamp
index = MultiIndex(levels=[['A', 'B', 'C'], [0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
df = DataFrame(arr,index=index,columns=['val'])
result = df.val['A']
expected = Series(arr.ravel()[0:3],name='val',index=Index([26,37,57],name='day'))
assert_series_equal(result,expected)
def f():
df.val['X']
self.assertRaises(KeyError, f)
# GH 7866
# multi-index slicing with missing indexers
s = pd.Series(np.arange(9,dtype='int64'),
index=pd.MultiIndex.from_product([['A','B','C'],['foo','bar','baz']],
names=['one','two'])
).sortlevel()
expected = pd.Series(np.arange(3,dtype='int64'),
index=pd.MultiIndex.from_product([['A'],['foo','bar','baz']],
names=['one','two'])
).sortlevel()
result = s.loc[['A']]
assert_series_equal(result,expected)
result = s.loc[['A','D']]
assert_series_equal(result,expected)
# not any values found
self.assertRaises(KeyError, lambda : s.loc[['D']])
# empty ok
result = s.loc[[]]
expected = s.iloc[[]]
assert_series_equal(result,expected)
idx = pd.IndexSlice
expected = pd.Series([0,3,6],
index=pd.MultiIndex.from_product([['A','B','C'],['foo']],
names=['one','two'])
).sortlevel()
result = s.loc[idx[:,['foo']]]
assert_series_equal(result,expected)
result = s.loc[idx[:,['foo','bah']]]
assert_series_equal(result,expected)
# GH 8737
# empty indexer
multi_index = pd.MultiIndex.from_product((['foo', 'bar', 'baz'], ['alpha', 'beta']))
df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index)
df = df.sortlevel(0, axis=1)
expected = DataFrame(index=range(5),columns=multi_index.reindex([])[0])
result1 = df.loc[:, ([], slice(None))]
result2 = df.loc[:, (['foo'], [])]
assert_frame_equal(result1, expected)
assert_frame_equal(result2, expected)
# regression from < 0.14.0
# GH 7914
df = DataFrame([[np.mean, np.median],['mean','median']],
columns=MultiIndex.from_tuples([('functs','mean'),
('functs','median')]),
index=['function', 'name'])
result = df.loc['function',('functs','mean')]
self.assertEqual(result,np.mean)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
self.assertEqual(df['c'].dtype, np.float64)
df.ix[0,'c'] = 'foo'
expected = DataFrame([{"a": 1, "c" : 'foo'}, {"a": 3, "b": 2, "c" : np.nan}])
assert_frame_equal(df,expected)
# GH10280
df = DataFrame(np.arange(6,dtype='int64').reshape(2, 3),
index=list('ab'),
columns=['foo', 'bar', 'baz'])
for val in [3.14, 'wxyz']:
left = df.copy()
left.loc['a', 'bar'] = val
right = DataFrame([[0, val, 2], [3, 4, 5]],
index=list('ab'),
columns=['foo', 'bar', 'baz'])
assert_frame_equal(left, right)
self.assertTrue(com.is_integer_dtype(left['foo']))
self.assertTrue(com.is_integer_dtype(left['baz']))
left = DataFrame(np.arange(6,dtype='int64').reshape(2, 3) / 10.0,
index=list('ab'),
columns=['foo', 'bar', 'baz'])
left.loc['a', 'bar'] = 'wxyz'
right = DataFrame([[0, 'wxyz', .2], [.3, .4, .5]],
index=list('ab'),
columns=['foo', 'bar', 'baz'])
assert_frame_equal(left, right)
self.assertTrue(com.is_float_dtype(left['foo']))
self.assertTrue(com.is_float_dtype(left['baz']))
def test_setitem_iloc(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"])
df.iloc[[0,1],[1,2]]
df.iloc[[0,1],[1,2]] += 100
expected = DataFrame(np.array([0,101,102,3,104,105,6,7,8]).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"])
assert_frame_equal(df,expected)
def test_dups_fancy_indexing(self):
# GH 3455
from pandas.util.testing import makeCustomDataframe as mkdf
df= mkdf(10, 3)
df.columns = ['a','a','b']
cols = ['b','a']
result = df[['b','a']].columns
expected = Index(['b','a','a'])
self.assertTrue(result.equals(expected))
# across dtypes
df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('aaaaaaa'))
df.head()
str(df)
result = DataFrame([[1,2,1.,2.,3.,'foo','bar']])
result.columns = list('aaaaaaa')
df_v = df.iloc[:,4]
res_v = result.iloc[:,4]
assert_frame_equal(df,result)
# GH 3561, dups not in selected order
df = DataFrame({'test': [5,7,9,11], 'test1': [4.,5,6,7], 'other': list('abcd') }, index=['A', 'A', 'B', 'C'])
rows = ['C', 'B']
expected = DataFrame({'test' : [11,9], 'test1': [ 7., 6], 'other': ['d','c']},index=rows)
result = df.ix[rows]
assert_frame_equal(result, expected)
result = df.ix[Index(rows)]
assert_frame_equal(result, expected)
rows = ['C','B','E']
expected = DataFrame({'test' : [11,9,np.nan], 'test1': [7.,6,np.nan], 'other': ['d','c',np.nan]},index=rows)
result = df.ix[rows]
assert_frame_equal(result, expected)
# see GH5553, make sure we use the right indexer
rows = ['F','G','H','C','B','E']
expected = DataFrame({'test' : [np.nan,np.nan,np.nan,11,9,np.nan],
'test1': [np.nan,np.nan,np.nan,7.,6,np.nan],
'other': [np.nan,np.nan,np.nan,'d','c',np.nan]},index=rows)
result = df.ix[rows]
assert_frame_equal(result, expected)
# inconsistent returns for unique/duplicate indices when values are missing
df = DataFrame(randn(4,3),index=list('ABCD'))
expected = df.ix[['E']]
dfnu = DataFrame(randn(5,3),index=list('AABCD'))
result = dfnu.ix[['E']]
assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH 11497
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": [0, 1, 2]})
result = df.ix[[0, 8, 0]]
expected = DataFrame({"A": [0, np.nan, 0]}, index=[0, 8, 0])
assert_frame_equal(result, expected, check_index_type=False)
df = DataFrame({"A": list('abc')})
result = df.ix[[0,8,0]]
expected = DataFrame({"A": ['a', np.nan, 'a']}, index=[0, 8, 0])
assert_frame_equal(result, expected, check_index_type=False)
# non unique with non unique selector
df = DataFrame({'test': [5, 7, 9, 11]}, index=['A', 'A', 'B', 'C'])
expected = DataFrame({'test' : [5, 7, 5, 7, np.nan]}, index=['A', 'A', 'A', 'A', 'E'])
result = df.ix[['A', 'A', 'E']]
assert_frame_equal(result, expected)
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=['A', 'B', 'B', 'B', 'A'])
expected = pd.concat([df.ix[:,['A','B']],DataFrame(np.nan,columns=['C'],index=df.index)],axis=1)
result = df.ix[:,['A','B','C']]
assert_frame_equal(result, expected)
# GH 6504, multi-axis indexing
df = DataFrame(np.random.randn(9,2), index=[1,1,1,2,2,2,3,3,3], columns=['a', 'b'])
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
assert_frame_equal(result, expected)
expected = df
result = df.loc[:,['a', 'b']]
assert_frame_equal(result, expected)
expected = df.iloc[0:6,:]
result = df.loc[[1, 2], ['a', 'b']]
assert_frame_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df=DataFrame({'a':{1:'aaa',2:'bbb',3:'ccc'},'b':{1:111,2:222,3:333}})
# this works, new column is created correctly
df['test']=df['a'].apply(lambda x: '_' if x=='aaa' else x)
# this does not work, ie column test is not changed
idx=df['test']=='_'
temp=df.ix[idx,'a'].apply(lambda x: '-----' if x=='aaa' else x)
df.ix[idx,'test']=temp
self.assertEqual(df.iloc[0,2], '-----')
#if I look at df, then element [0,2] equals '_'. If instead I type df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I get '_'.
def test_multitype_list_index_access(self):
#GH 10610
df = pd.DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with self.assertRaises(IndexError):
vals = df[[22, 26, -8]]
self.assertEqual(df[21].shape[0], df.shape[0])
def test_set_index_nan(self):
# GH 3586
df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13',
24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'},
'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan,
26: nan, 27: nan, 28: nan, 29: nan, 30: nan},
'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999,
21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996,
25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008,
29: 0.80140849999999997, 30: 0.81307740000000006},
'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985,
24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986}}).reset_index()
result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns)
assert_frame_equal(result,df)
def test_multi_nan_indexing(self):
# GH 3588
df = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]})
result = df.set_index(['a','b'], drop=False)
expected = DataFrame({"a":['R1', 'R2', np.nan, 'R4'], 'b':["C1", "C2", "C3" , "C4"], "c":[10, 15, np.nan , 20]},
index = [Index(['R1','R2',np.nan,'R4'],name='a'),Index(['C1','C2','C3','C4'],name='b')])
assert_frame_equal(result,expected)
def test_iloc_panel_issue(self):
# GH 3617
p = Panel(randn(4, 4, 4))
self.assertEqual(p.iloc[:3, :3, :3].shape, (3,3,3))
self.assertEqual(p.iloc[1, :3, :3].shape, (3,3))
self.assertEqual(p.iloc[:3, 1, :3].shape, (3,3))
self.assertEqual(p.iloc[:3, :3, 1].shape, (3,3))
self.assertEqual(p.iloc[1, 1, :3].shape, (3,))
self.assertEqual(p.iloc[1, :3, 1].shape, (3,))
self.assertEqual(p.iloc[:3, 1, 1].shape, (3,))
def test_panel_getitem(self):
# GH4016, date selection returns a frame when a partial string selection
ind = date_range(start="2000", freq="D", periods=1000)
df = DataFrame(np.random.randn(len(ind), 5), index=ind, columns=list('ABCDE'))
panel = Panel(dict([ ('frame_'+c,df) for c in list('ABC') ]))
test2 = panel.ix[:, "2002":"2002-12-31"]
test1 = panel.ix[:, "2002"]
tm.assert_panel_equal(test1,test2)
# GH8710
# multi-element getting with a list
panel = tm.makePanel()
expected = panel.iloc[[0,1]]
result = panel.loc[['ItemA','ItemB']]
tm.assert_panel_equal(result,expected)
result = panel.loc[['ItemA','ItemB'],:,:]
tm.assert_panel_equal(result,expected)
result = panel[['ItemA','ItemB']]
tm.assert_panel_equal(result,expected)
result = panel.loc['ItemA':'ItemB']
tm.assert_panel_equal(result,expected)
result = panel.ix['ItemA':'ItemB']
tm.assert_panel_equal(result,expected)
result = panel.ix[['ItemA','ItemB']]
tm.assert_panel_equal(result,expected)
# with an object-like
# GH 9140
class TestObject:
def __str__(self):
return "TestObject"
obj = TestObject()
p = Panel(np.random.randn(1,5,4), items=[obj],
major_axis = date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
expected = p.iloc[0]
result = p[obj]
tm.assert_frame_equal(result, expected)
def test_panel_setitem(self):
# GH 7763
# loc and setitem have setting differences
np.random.seed(0)
index=range(3)
columns = list('abc')
panel = Panel({'A' : DataFrame(np.random.randn(3, 3), index=index, columns=columns),
'B' : DataFrame(np.random.randn(3, 3), index=index, columns=columns),
'C' : DataFrame(np.random.randn(3, 3), index=index, columns=columns)
})
replace = DataFrame(np.eye(3,3), index=range(3), columns=columns)
expected = Panel({ 'A' : replace, 'B' : replace, 'C' : replace })
p = panel.copy()
for idx in list('ABC'):
p[idx] = replace
tm.assert_panel_equal(p, expected)
p = panel.copy()
for idx in list('ABC'):
p.loc[idx,:,:] = replace
tm.assert_panel_equal(p, expected)
def test_panel_setitem_with_multiindex(self):
# 10360
# failing with a multi-index
arr = np.array([[[1,2,3],[0,0,0]],[[0,0,0],[0,0,0]]],dtype=np.float64)
# reg index
axes = dict(items=['A', 'B'], major_axis=[0, 1], minor_axis=['X', 'Y' ,'Z'])
p1 = Panel(0., **axes)
p1.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p1, expected)
# multi-indexes
axes['items'] = pd.MultiIndex.from_tuples([('A','a'), ('B','b')])
p2 = Panel(0., **axes)
p2.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p2, expected)
axes['major_axis']=pd.MultiIndex.from_tuples([('A',1),('A',2)])
p3 = Panel(0., **axes)
p3.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p3, expected)
axes['minor_axis']=pd.MultiIndex.from_product([['X'],range(3)])
p4 = Panel(0., **axes)
p4.iloc[0, 0, :] = [1, 2, 3]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p4, expected)
arr = np.array([[[1,0,0],[2,0,0]],[[0,0,0],[0,0,0]]],dtype=np.float64)
p5 = Panel(0., **axes)
p5.iloc[0, :, 0] = [1, 2]
expected = Panel(arr, **axes)
tm.assert_panel_equal(p5, expected)
def test_panel_assignment(self):
# GH3777
wp = Panel(randn(2, 5, 4), items=['Item1', 'Item2'], major_axis=date_range('1/1/2000', periods=5), minor_axis=['A', 'B', 'C', 'D'])
wp2 = Panel(randn(2, 5, 4), items=['Item1', 'Item2'], major_axis=date_range('1/1/2000', periods=5), minor_axis=['A', 'B', 'C', 'D'])
expected = wp.loc[['Item1', 'Item2'], :, ['A', 'B']]
def f():
wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']]
self.assertRaises(NotImplementedError, f)
#wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']]
#result = wp.loc[['Item1', 'Item2'], :, ['A', 'B']]
#tm.assert_panel_equal(result,expected)
def test_multiindex_assignment(self):
# GH3777 part 2
# mixed dtype
df = DataFrame(np.random.randint(5,10,size=9).reshape(3, 3),
columns=list('abc'),
index=[[4,4,8],[8,10,12]])
df['d'] = np.nan
arr = np.array([0.,1.])
df.ix[4,'d'] = arr
assert_series_equal(df.ix[4,'d'],Series(arr,index=[8,10],name='d'))
# single dtype
df = DataFrame(np.random.randint(5,10,size=9).reshape(3, 3),
columns=list('abc'),
index=[[4,4,8],[8,10,12]])
df.ix[4,'c'] = arr
assert_series_equal(df.ix[4,'c'],Series(arr,index=[8,10],name='c',dtype='int64'))
# scalar ok
df.ix[4,'c'] = 10
assert_series_equal(df.ix[4,'c'],Series(10,index=[8,10],name='c',dtype='int64'))
# invalid assignments
def f():
df.ix[4,'c'] = [0,1,2,3]
self.assertRaises(ValueError, f)
def f():
df.ix[4,'c'] = [0]
self.assertRaises(ValueError, f)
# groupby example
NUM_ROWS = 100
NUM_COLS = 10
col_names = ['A'+num for num in map(str,np.arange(NUM_COLS).tolist())]
index_cols = col_names[:5]
df = DataFrame(np.random.randint(5, size=(NUM_ROWS,NUM_COLS)), dtype=np.int64, columns=col_names)
df = df.set_index(index_cols).sort_index()
grp = df.groupby(level=index_cols[:4])
df['new_col'] = np.nan
f_index = np.arange(5)
def f(name,df2):
return Series(np.arange(df2.shape[0]),name=df2.index.values[0]).reindex(f_index)
new_df = pd.concat([ f(name,df2) for name, df2 in grp ],axis=1).T
# we are actually operating on a copy here
# but in this case, that's ok
for name, df2 in grp:
new_vals = np.arange(df2.shape[0])
df.ix[name, 'new_col'] = new_vals
def test_multi_assign(self):
# GH 3626, an assignement of a sub-df to a df
df = DataFrame({'FC':['a','b','a','b','a','b'],
'PF':[0,0,0,0,1,1],
'col1':lrange(6),
'col2':lrange(6,12)})
df.ix[1,0]=np.nan
df2 = df.copy()
mask=~df2.FC.isnull()
cols=['col1', 'col2']
dft = df2 * 2
dft.ix[3,3] = np.nan
expected = DataFrame({'FC':['a',np.nan,'a','b','a','b'],
'PF':[0,0,0,0,1,1],
'col1':Series([0,1,4,6,8,10]),
'col2':[12,7,16,np.nan,20,22]})
# frame on rhs
df2.ix[mask, cols]= dft.ix[mask, cols]
assert_frame_equal(df2,expected)
df2.ix[mask, cols]= dft.ix[mask, cols]
assert_frame_equal(df2,expected)
# with an ndarray on rhs
df2 = df.copy()
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
df2.ix[mask, cols]= dft.ix[mask, cols].values
assert_frame_equal(df2,expected)
# broadcasting on the rhs is required
df = DataFrame(dict(A = [1,2,0,0,0],B=[0,0,0,10,11],C=[0,0,0,10,11],D=[3,4,5,6,7]))
expected = df.copy()
mask = expected['A'] == 0
for col in ['A','B']:
expected.loc[mask,col] = df['D']
df.loc[df['A']==0,['A','B']] = df['D']
assert_frame_equal(df,expected)
def test_ix_assign_column_mixed(self):
# GH #1142
df = DataFrame(tm.getSeriesData())
df['foo'] = 'bar'
orig = df.ix[:, 'B'].copy()
df.ix[:, 'B'] = df.ix[:, 'B'] + 1
assert_series_equal(df.B, orig + 1)
# GH 3668, mixed frame with series value
df = DataFrame({'x':lrange(10), 'y':lrange(10,20),'z' : 'bar'})
expected = df.copy()
for i in range(5):
indexer = i*2
v = 1000 + i*200
expected.ix[indexer, 'y'] = v
self.assertEqual(expected.ix[indexer, 'y'], v)
df.ix[df.x % 2 == 0, 'y'] = df.ix[df.x % 2 == 0, 'y'] * 100
assert_frame_equal(df,expected)
# GH 4508, making sure consistency of assignments
df = DataFrame({'a':[1,2,3],'b':[0,1,2]})
df.ix[[0,2,],'b'] = [100,-100]
expected = DataFrame({'a' : [1,2,3], 'b' : [100,1,-100] })
assert_frame_equal(df,expected)
df = pd.DataFrame({'a': lrange(4) })
df['b'] = np.nan
df.ix[[1,3],'b'] = [100,-100]
expected = DataFrame({'a' : [0,1,2,3], 'b' : [np.nan,100,np.nan,-100] })
assert_frame_equal(df,expected)
# ok, but chained assignments are dangerous
# if we turn off chained assignement it will work
with option_context('chained_assignment',None):
df = pd.DataFrame({'a': lrange(4) })
df['b'] = np.nan
df['b'].ix[[1,3]] = [100,-100]
assert_frame_equal(df,expected)
def test_ix_get_set_consistency(self):
# GH 4544
# ix/loc get/set not consistent when
# a mixed int/string index
df = DataFrame(np.arange(16).reshape((4, 4)),
columns=['a', 'b', 8, 'c'],
index=['e', 7, 'f', 'g'])
self.assertEqual(df.ix['e', 8], 2)
self.assertEqual(df.loc['e', 8], 2)
df.ix['e', 8] = 42
self.assertEqual(df.ix['e', 8], 42)
self.assertEqual(df.loc['e', 8], 42)
df.loc['e', 8] = 45
self.assertEqual(df.ix['e', 8], 45)
self.assertEqual(df.loc['e', 8], 45)
def test_setitem_list(self):
# GH 6043
# ix with a list
df = DataFrame(index=[0,1], columns=[0])
df.ix[1,0] = [1,2,3]
df.ix[1,0] = [1,2]
result = DataFrame(index=[0,1], columns=[0])
result.ix[1,0] = [1,2]
assert_frame_equal(result,df)
# ix with an object
class TO(object):
def __init__(self, value):
self.value = value
def __str__(self):
return "[{0}]".format(self.value)
__repr__ = __str__
def __eq__(self, other):
return self.value == other.value
def view(self):
return self
df = DataFrame(index=[0,1], columns=[0])
df.ix[1,0] = TO(1)
df.ix[1,0] = TO(2)
result = DataFrame(index=[0,1], columns=[0])
result.ix[1,0] = TO(2)
assert_frame_equal(result,df)
# remains object dtype even after setting it back
df = DataFrame(index=[0,1], columns=[0])
df.ix[1,0] = TO(1)
df.ix[1,0] = np.nan
result = DataFrame(index=[0,1], columns=[0])
assert_frame_equal(result, df)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a%2 == 0)
self.assertRaises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
self.assertRaises(NotImplementedError, df.iloc.__getitem__, tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask),dtype=bool)]
assert_frame_equal(result,df)
# the possibilities
locs = np.arange(4)
nums = 2**locs
reps = lmap(bin, nums)
df = DataFrame({'locs':locs, 'nums':nums}, reps)
expected = {
(None,'') : '0b1100',
(None,'.loc') : '0b1100',
(None,'.iloc') : '0b1100',
('index','') : '0b11',
('index','.loc') : '0b11',
('index','.iloc') : 'iLocation based boolean indexing cannot use an indexable as a mask',
('locs','') : 'Unalignable boolean Series key provided',
('locs','.loc') : 'Unalignable boolean Series key provided',
('locs','.iloc') : 'iLocation based boolean indexing on an integer type is not available',
}
warnings.filterwarnings(action='ignore', category=UserWarning)
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums>2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx,method])
r = expected.get(key)
if r != ans:
raise AssertionError("[%s] does not match [%s], received [%s]" %
(key,ans,r))
warnings.filterwarnings(action='always', category=UserWarning)
def test_ix_slicing_strings(self):
##GH3836
data = {'Classification': ['SA EQUITY CFD', 'bbb', 'SA EQUITY', 'SA SSF', 'aaa'],
'Random': [1,2,3,4,5],
'X': ['correct', 'wrong','correct', 'correct','wrong']}
df = DataFrame(data)
x = df[~df.Classification.isin(['SA EQUITY CFD', 'SA EQUITY', 'SA SSF'])]
df.ix[x.index,'X'] = df['Classification']
expected = DataFrame({'Classification': {0: 'SA EQUITY CFD', 1: 'bbb',
2: 'SA EQUITY', 3: 'SA SSF', 4: 'aaa'},
'Random': {0: 1, 1: 2, 2: 3, 3: 4, 4: 5},
'X': {0: 'correct', 1: 'bbb', 2: 'correct',
3: 'correct', 4: 'aaa'}}) # bug was 4: 'bbb'
assert_frame_equal(df, expected)
def test_non_unique_loc(self):
## GH3659
## non-unique indexer with loc slice
## https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs
# these are going to raise becuase the we are non monotonic
df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3])
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,None)]))
self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,2)]))
# monotonic are ok
df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]).sort_index(axis=0)
result = df.loc[1:]
expected = DataFrame({'A' : [2,4,5,6], 'B' : [4, 6,7,8]}, index = [1,1,2,3])
assert_frame_equal(result,expected)
result = df.loc[0:]
assert_frame_equal(result,df)
result = df.loc[1:2]
expected = DataFrame({'A' : [2,4,5], 'B' : [4,6,7]}, index = [1,1,2])
assert_frame_equal(result,expected)
def test_loc_name(self):
# GH 3880
df = DataFrame([[1, 1], [1, 1]])
df.index.name = 'index_name'
result = df.iloc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.ix[[0, 1]].index.name
self.assertEqual(result, 'index_name')
result = df.loc[[0, 1]].index.name
self.assertEqual(result, 'index_name')
def test_iloc_non_unique_indexing(self):
#GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A' : [0.1] * 3000, 'B' : [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2*df, 3*df])
result = df3.iloc[idx]
assert_frame_equal(result, expected)
df2 = DataFrame({'A' : [0.1] * 1000, 'B' : [1] * 1000})
df2 = pd.concat([df2, 2*df2, 3*df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx<=sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s*2)
new_list.append(s*3)
expected = DataFrame(new_list)
expected = pd.concat([ expected, DataFrame(index=idx[idx>sidx.max()]) ])
result = df2.loc[idx]
assert_frame_equal(result, expected, check_index_type=False)
def test_mi_access(self):
# GH 4145
data = """h1 main h3 sub h5
0 a A 1 A1 1
1 b B 2 B1 2
2 c B 3 A1 3
3 d A 4 B2 4
4 e A 5 B2 5
5 f B 6 A2 6
"""
df = pd.read_csv(StringIO(data),sep='\s+',index_col=0)
df2 = df.set_index(['main', 'sub']).T.sort_index(1)
index = Index(['h1','h3','h5'])
columns = MultiIndex.from_tuples([('A','A1')],names=['main','sub'])
expected = DataFrame([['a',1,1]],index=columns,columns=index).T
result = df2.loc[:,('A','A1')]
assert_frame_equal(result,expected)
result = df2[('A','A1')]
assert_frame_equal(result,expected)
# GH 4146, not returning a block manager when selecting a unique index
# from a duplicate index
# as of 4879, this returns a Series (which is similar to what happens with a non-unique)
expected = Series(['a',1,1], index=['h1','h3','h5'], name='A1')
result = df2['A']['A1']
assert_series_equal(result, expected)
# selecting a non_unique from the 2nd level
expected = DataFrame([['d',4,4],['e',5,5]],index=Index(['B2','B2'],name='sub'),columns=['h1','h3','h5'],).T
result = df2['A']['B2']
assert_frame_equal(result, expected)
def test_non_unique_loc_memory_error(self):
# GH 4280
# non_unique index with a large selection triggers a memory error
columns = list('ABCDEFG')
def gen_test(l,l2):
return pd.concat([ DataFrame(randn(l,len(columns)),index=lrange(l),columns=columns),
DataFrame(np.ones((l2,len(columns))),index=[0]*l2,columns=columns) ])
def gen_expected(df,mask):
l = len(mask)
return pd.concat([
df.take([0],convert=False),
DataFrame(np.ones((l,len(columns))),index=[0]*l,columns=columns),
df.take(mask[1:],convert=False) ])
df = gen_test(900,100)
self.assertFalse(df.index.is_unique)
mask = np.arange(100)
result = df.loc[mask]
expected = gen_expected(df,mask)
assert_frame_equal(result,expected)
df = gen_test(900000,100000)
self.assertFalse(df.index.is_unique)
mask = np.arange(100000)
result = df.loc[mask]
expected = gen_expected(df,mask)
assert_frame_equal(result,expected)
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame([['1','2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
df = df_orig.copy()
df.iloc[:,0:2] = df.iloc[:,0:2].astype(np.int64)
expected = DataFrame([[1,2,'3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
df = df_orig.copy()
df.iloc[:,0:2] = df.iloc[:,0:2]._convert(datetime=True, numeric=True)
expected = DataFrame([[1,2,'3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:,'A'] = df.loc[:,'A'].astype(np.int64)
expected = DataFrame([[1,'2','3','.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
df = df_orig.copy()
df.loc[:,['B','C']] = df.loc[:,['B','C']].astype(np.int64)
expected = DataFrame([['1',2,3,'.4',5,6.,'foo']],columns=list('ABCDEFG'))
assert_frame_equal(df,expected)
# full replacements / no nans
df = DataFrame({'A': [1., 2., 3., 4.]})
df.iloc[:, 0] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
assert_frame_equal(df,expected)
df = DataFrame({'A': [1., 2., 3., 4.]})
df.loc[:, 'A'] = df['A'].astype(np.int64)
expected = DataFrame({'A': [1, 2, 3, 4]})
assert_frame_equal(df,expected)
def test_astype_assignment_with_dups(self):
# GH 4686
# assignment with dups that has a dtype change
df = DataFrame(
np.arange(3).reshape((1,3)),
columns=pd.MultiIndex.from_tuples(
[('A', '1'), ('B', '1'), ('A', '2')]
),
dtype=object
)
index = df.index.copy()
df['A'] = df['A'].astype(np.float64)
result = df.get_dtype_counts().sort_index()
expected = Series({ 'float64' : 2, 'object' : 1 }).sort_index()
self.assertTrue(df.index.equals(index))
def test_dups_loc(self):
# GH4726
# dup indexing with iloc/loc
df = DataFrame([[1, 2, 'foo', 'bar', Timestamp('20130101')]],
columns=['a','a','a','a','a'], index=[1])
expected = Series([1, 2, 'foo', 'bar', Timestamp('20130101')],
index=['a','a','a','a','a'], name=1)
result = df.iloc[0]
assert_series_equal(result, expected)
result = df.loc[1]
assert_series_equal(result, expected)
def test_partial_setting(self):
# GH2578, allow ix and friends to partially set
### series ###
s_orig = Series([1,2,3])
s = s_orig.copy()
s[5] = 5
expected = Series([1,2,3,5],index=[0,1,2,5])
assert_series_equal(s,expected)
s = s_orig.copy()
s.loc[5] = 5
expected = Series([1,2,3,5],index=[0,1,2,5])
assert_series_equal(s,expected)
s = s_orig.copy()
s[5] = 5.
expected = Series([1,2,3,5.],index=[0,1,2,5])
assert_series_equal(s,expected)
s = s_orig.copy()
s.loc[5] = 5.
expected = Series([1,2,3,5.],index=[0,1,2,5])
assert_series_equal(s,expected)
# iloc/iat raise
s = s_orig.copy()
def f():
s.iloc[3] = 5.
self.assertRaises(IndexError, f)
def f():
s.iat[3] = 5.
self.assertRaises(IndexError, f)
### frame ###
df_orig = DataFrame(np.arange(6).reshape(3,2),columns=['A','B'],dtype='int64')
# iloc/iat raise
df = df_orig.copy()
def f():
df.iloc[4,2] = 5.
self.assertRaises(IndexError, f)
def f():
df.iat[4,2] = 5.
self.assertRaises(IndexError, f)
# row setting where it exists
expected = DataFrame(dict({ 'A' : [0,4,4], 'B' : [1,5,5] }))
df = df_orig.copy()
df.iloc[1] = df.iloc[2]
assert_frame_equal(df,expected)
expected = DataFrame(dict({ 'A' : [0,4,4], 'B' : [1,5,5] }))
df = df_orig.copy()
df.loc[1] = df.loc[2]
assert_frame_equal(df,expected)
# like 2578, partial setting with dtype preservation
expected = DataFrame(dict({ 'A' : [0,2,4,4], 'B' : [1,3,5,5] }))
df = df_orig.copy()
df.loc[3] = df.loc[2]
assert_frame_equal(df,expected)
# single dtype frame, overwrite
expected = DataFrame(dict({ 'A' : [0,2,4], 'B' : [0,2,4] }))
df = df_orig.copy()
df.ix[:,'B'] = df.ix[:,'A']
assert_frame_equal(df,expected)
# mixed dtype frame, overwrite
expected = DataFrame(dict({ 'A' : [0,2,4], 'B' : Series([0,2,4]) }))
df = df_orig.copy()
df['B'] = df['B'].astype(np.float64)
df.ix[:,'B'] = df.ix[:,'A']
assert_frame_equal(df,expected)
# single dtype frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:,'C'] = df.ix[:,'A']
assert_frame_equal(df,expected)
# mixed frame, partial setting
expected = df_orig.copy()
expected['C'] = df['A']
df = df_orig.copy()
df.ix[:,'C'] = df.ix[:,'A']
assert_frame_equal(df,expected)
### panel ###
p_orig = Panel(np.arange(16).reshape(2,4,2),items=['Item1','Item2'],major_axis=pd.date_range('2001/1/12',periods=4),minor_axis=['A','B'],dtype='float64')
# panel setting via item
p_orig = Panel(np.arange(16).reshape(2,4,2),items=['Item1','Item2'],major_axis=pd.date_range('2001/1/12',periods=4),minor_axis=['A','B'],dtype='float64')
expected = p_orig.copy()
expected['Item3'] = expected['Item1']
p = p_orig.copy()
p.loc['Item3'] = p['Item1']
assert_panel_equal(p,expected)
# panel with aligned series
expected = p_orig.copy()
expected = expected.transpose(2,1,0)
expected['C'] = DataFrame({ 'Item1' : [30,30,30,30], 'Item2' : [32,32,32,32] },index=p_orig.major_axis)
expected = expected.transpose(2,1,0)
p = p_orig.copy()
p.loc[:,:,'C'] = Series([30,32],index=p_orig.items)
assert_panel_equal(p,expected)
# GH 8473
dates = date_range('1/1/2000', periods=8)
df_orig = DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D'])
expected = pd.concat([df_orig,DataFrame({'A' : 7},index=[dates[-1]+1])])
df = df_orig.copy()
df.loc[dates[-1]+1, 'A'] = 7
assert_frame_equal(df,expected)
df = df_orig.copy()
df.at[dates[-1]+1, 'A'] = 7
assert_frame_equal(df,expected)
expected = pd.concat([df_orig,DataFrame({0 : 7},index=[dates[-1]+1])],axis=1)
df = df_orig.copy()
df.loc[dates[-1]+1, 0] = 7
assert_frame_equal(df,expected)
df = df_orig.copy()
df.at[dates[-1]+1, 0] = 7
assert_frame_equal(df,expected)
def test_partial_setting_mixed_dtype(self):
# in a mixed dtype environment, try to preserve dtypes
# by appending
df = DataFrame([[True, 1],[False, 2]],
columns = ["female","fitness"])
s = df.loc[1].copy()
s.name = 2
expected = df.append(s)
df.loc[2] = df.loc[1]
assert_frame_equal(df, expected)
# columns will align
df = DataFrame(columns=['A','B'])
df.loc[0] = Series(1,index=range(4))
assert_frame_equal(df,DataFrame(columns=['A','B'],index=[0]))
# columns will align
df = DataFrame(columns=['A','B'])
df.loc[0] = Series(1,index=['B'])
assert_frame_equal(df,DataFrame([[np.nan, 1]], columns=['A','B'],index=[0],dtype='float64'))
# list-like must conform
df = DataFrame(columns=['A','B'])
def f():
df.loc[0] = [1,2,3]
self.assertRaises(ValueError, f)
# these are coerced to float unavoidably (as its a list-like to begin)
df = DataFrame(columns=['A','B'])
df.loc[3] = [6,7]
assert_frame_equal(df,DataFrame([[6,7]],index=[3],columns=['A','B'],dtype='float64'))
def test_partial_setting_with_datetimelike_dtype(self):
# GH9478
# a datetimeindex alignment issue with partial setting
df = pd.DataFrame(np.arange(6.).reshape(3,2), columns=list('AB'),
index=pd.date_range('1/1/2000', periods=3, freq='1H'))
expected = df.copy()
expected['C'] = [expected.index[0]] + [pd.NaT,pd.NaT]
mask = df.A < 1
df.loc[mask, 'C'] = df.loc[mask].index
assert_frame_equal(df, expected)
def test_loc_setitem_datetime(self):
# GH 9516
dt1 = Timestamp('20130101 09:00:00')
dt2 = Timestamp('20130101 10:00:00')
for conv in [lambda x: x, lambda x: x.to_datetime64(),
lambda x: x.to_pydatetime(), lambda x: np.datetime64(x)]:
df = pd.DataFrame()
df.loc[conv(dt1),'one'] = 100
df.loc[conv(dt2),'one'] = 200
expected = DataFrame({'one' : [100.0, 200.0]},index=[dt1, dt2])
assert_frame_equal(df, expected)
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
# ToDo: check_index_type can be True after GH 11497
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
assert_series_equal(result, expected, check_index_type=False)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda : ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
assert_series_equal(result, expected, check_index_type=False)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
assert_series_equal(result, expected, check_index_type=False)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 3, 3]]
assert_series_equal(result, expected, check_index_type=False)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 4, 4]]
assert_series_equal(result, expected, check_index_type=False)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4], index=[4, 5, 6, 7]).loc[[7, 2, 2]]
assert_series_equal(result, expected, check_index_type=False)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[4, 5, 5]]
assert_series_equal(result, expected, check_index_type=False)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
assert_series_equal(result, expected, check_index_type=False)
def test_partial_set_invalid(self):
# GH 4940
# allow only setting of 'valid' values
df = tm.makeTimeDataFrame()
# don't allow not string inserts
def f():
df.loc[100.0, :] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.loc[100,:] = df.ix[0]
self.assertRaises(TypeError, f)
def f():
df.ix[100.0, :] = df.ix[0]
self.assertRaises(ValueError, f)
def f():
df.ix[100,:] = df.ix[0]
self.assertRaises(ValueError, f)
# allow object conversion here
df.loc['a',:] = df.ix[0]
def test_partial_set_empty(self):
# GH5226
# partially set with an empty object
# series
s = Series()
s.loc[1] = 1
assert_series_equal(s,Series([1],index=[1]))
s.loc[3] = 3
assert_series_equal(s,Series([1,3],index=[1,3]))
s = Series()
s.loc[1] = 1.
assert_series_equal(s,Series([1.],index=[1]))
s.loc[3] = 3.
assert_series_equal(s,Series([1.,3.],index=[1,3]))
s = Series()
s.loc['foo'] = 1
assert_series_equal(s,Series([1],index=['foo']))
s.loc['bar'] = 3
assert_series_equal(s,Series([1,3],index=['foo','bar']))
s.loc[3] = 4
assert_series_equal(s,Series([1,3,4],index=['foo','bar',3]))
# partially set with an empty object
# frame
df = DataFrame()
def f():
df.loc[1] = 1
self.assertRaises(ValueError, f)
def f():
df.loc[1] = Series([1],index=['foo'])
self.assertRaises(ValueError, f)
def f():
df.loc[:,1] = 1
self.assertRaises(ValueError, f)
# these work as they don't really change
# anything but the index
# GH5632
expected = DataFrame(columns=['foo'], index=pd.Index([], dtype='int64'))
def f():
df = DataFrame()
df['foo'] = Series([], dtype='object')
return df
assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(df.index)
return df
assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = df.index
return df
assert_frame_equal(f(), expected)
expected = DataFrame(columns=['foo'], index=pd.Index([], dtype='int64'))
expected['foo'] = expected['foo'].astype('float64')
def f():
df = DataFrame()
df['foo'] = []
return df
assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = Series(range(len(df)))
return df
assert_frame_equal(f(), expected)
def f():
df = DataFrame()
df['foo'] = range(len(df))
return df
assert_frame_equal(f(), expected)
df = DataFrame()
df2 = DataFrame()
df2[1] = Series([1], index=['foo'])
df.loc[:,1] = Series([1], index=['foo'])
assert_frame_equal(df,DataFrame([[1]], index=['foo'], columns=[1]))
assert_frame_equal(df,df2)
# no index to start
expected = DataFrame({ 0 : Series(1,index=range(4)) }, columns=['A','B',0])
df = DataFrame(columns=['A','B'])
df[0] = Series(1, index=range(4))
df.dtypes
str(df)
assert_frame_equal(df,expected)
df = DataFrame(columns=['A','B'])
df.loc[:,0] = Series(1,index=range(4))
df.dtypes
str(df)
assert_frame_equal(df,expected)
# GH5720, GH5744
# don't create rows when empty
expected = DataFrame(columns=['A', 'B', 'New'], index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['New'] = expected['New'].astype('float64')
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
y['New'] = np.nan
assert_frame_equal(y, expected)
#assert_frame_equal(y,expected)
expected = DataFrame(columns=['a', 'b', 'c c', 'd'])
expected['d'] = expected['d'].astype('int64')
df = DataFrame(columns=['a', 'b', 'c c'])
df['d'] = 3
assert_frame_equal(df, expected)
assert_series_equal(df['c c'],Series(name='c c',dtype=object))
# reindex columns is ok
df = DataFrame({"A": [1, 2, 3], "B": [1.2, 4.2, 5.2]})
y = df[df.A > 5]
result = y.reindex(columns=['A','B','C'])
expected = DataFrame(columns=['A','B','C'], index=pd.Index([], dtype='int64'))
expected['A'] = expected['A'].astype('int64')
expected['B'] = expected['B'].astype('float64')
expected['C'] = expected['C'].astype('float64')
assert_frame_equal(result,expected)
# GH 5756
# setting with empty Series
df = DataFrame(Series())
assert_frame_equal(df, DataFrame({ 0 : Series() }))
df = DataFrame(Series(name='foo'))
assert_frame_equal(df, DataFrame({ 'foo' : Series() }))
# GH 5932
# copy on empty with assignment fails
df = DataFrame(index=[0])
df = df.copy()
df['a'] = 0
expected = DataFrame(0,index=[0],columns=['a'])
assert_frame_equal(df, expected)
# GH 6171
# consistency on empty frames
df = DataFrame(columns=['x', 'y'])
df['x'] = [1, 2]
expected = DataFrame(dict(x = [1,2], y = [np.nan,np.nan]))
assert_frame_equal(df, expected, check_dtype=False)
df = DataFrame(columns=['x', 'y'])
df['x'] = ['1', '2']
expected = DataFrame(dict(x = ['1','2'], y = [np.nan,np.nan]),dtype=object)
assert_frame_equal(df, expected)
df = DataFrame(columns=['x', 'y'])
df.loc[0, 'x'] = 1
expected = DataFrame(dict(x = [1], y = [np.nan]))
assert_frame_equal(df, expected, check_dtype=False)
def test_cache_updating(self):
# GH 4939, make sure to update the cache on setitem
df = tm.makeDataFrame()
df['A'] # cache series
df.ix["Hello Friend"] = df.ix[0]
self.assertIn("Hello Friend", df['A'].index)
self.assertIn("Hello Friend", df['B'].index)
panel = tm.makePanel()
panel.ix[0] # get first item into cache
panel.ix[:, :, 'A+1'] = panel.ix[:, :, 'A'] + 1
self.assertIn("A+1", panel.ix[0].columns)
self.assertIn("A+1", panel.ix[1].columns)
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=['x', 'y', 'z'])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]['z'].iloc[0] = 1.
result = df.loc[(0,0),'z']
self.assertEqual(result, 1)
# correct setting
df.loc[(0,0),'z'] = 2
result = df.loc[(0,0),'z']
self.assertEqual(result, 2)
# 10264
df = DataFrame(np.zeros((5,5),dtype='int64'),columns=['a','b','c','d','e'],index=range(5))
df['f'] = 0
df.f.values[3] = 1
y = df.iloc[np.arange(2,len(df))]
df.f.values[3] = 2
expected = DataFrame(np.zeros((5,6),dtype='int64'),columns=['a','b','c','d','e','f'],index=range(5))
expected.at[3,'f'] = 2
assert_frame_equal(df, expected)
expected = Series([0,0,0,2,0],name='f')
assert_series_equal(df.f, expected)
def test_slice_consolidate_invalidate_item_cache(self):
# this is chained assignment, but will 'work'
with option_context('chained_assignment',None):
# #3970
df = DataFrame({ "aa":lrange(5), "bb":[2.2]*5})
# Creates a second float block
df["cc"] = 0.0
# caches a reference to the 'bb' series
df["bb"]
# repr machinery triggers consolidation
repr(df)
# Assignment to wrong series
df['bb'].iloc[0] = 0.17
df._clear_item_cache()
self.assertAlmostEqual(df['bb'][0], 0.17)
def test_setitem_cache_updating(self):
# GH 5424
cont = ['one', 'two','three', 'four', 'five', 'six', 'seven']
for do_ref in [False,False]:
df = DataFrame({'a' : cont, "b":cont[3:]+cont[:3] ,'c' : np.arange(7)})
# ref the cache
if do_ref:
df.ix[0,"c"]
# set it
df.ix[7,'c'] = 1
self.assertEqual(df.ix[0,'c'], 0.0)
self.assertEqual(df.ix[7,'c'], 1.0)
# GH 7084
# not updating cache on series setting with slices
expected = DataFrame({'A': [600, 600, 600]}, index=date_range('5/7/2014', '5/9/2014'))
out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014'))
df = DataFrame({'C': ['A', 'A', 'A'], 'D': [100, 200, 300]})
#loop through df to update out
six = Timestamp('5/7/2014')
eix = Timestamp('5/9/2014')
for ix, row in df.iterrows():
out.loc[six:eix,row['C']] = out.loc[six:eix,row['C']] + row['D']
assert_frame_equal(out, expected)
assert_series_equal(out['A'], expected['A'])
# try via a chain indexing
# this actually works
out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014'))
for ix, row in df.iterrows():
v = out[row['C']][six:eix] + row['D']
out[row['C']][six:eix] = v
assert_frame_equal(out, expected)
assert_series_equal(out['A'], expected['A'])
out = DataFrame({'A': [0, 0, 0]}, index=date_range('5/7/2014', '5/9/2014'))
for ix, row in df.iterrows():
out.loc[six:eix,row['C']] += row['D']
assert_frame_equal(out, expected)
assert_series_equal(out['A'], expected['A'])
def test_setitem_chained_setfault(self):
# GH6026
# setfaults under numpy 1.7.1 (ok on 1.8)
data = ['right', 'left', 'left', 'left', 'right', 'left', 'timeout']
mdata = ['right', 'left', 'left', 'left', 'right', 'left', 'none']
df = DataFrame({'response': np.array(data)})
mask = df.response == 'timeout'
df.response[mask] = 'none'
assert_frame_equal(df, DataFrame({'response': mdata }))
recarray = np.rec.fromarrays([data], names=['response'])
df = DataFrame(recarray)
mask = df.response == 'timeout'
df.response[mask] = 'none'
assert_frame_equal(df, DataFrame({'response': mdata }))
df = DataFrame({'response': data, 'response1' : data })
mask = df.response == 'timeout'
df.response[mask] = 'none'
assert_frame_equal(df, DataFrame({'response': mdata, 'response1' : data }))
# GH 6056
expected = DataFrame(dict(A = [np.nan,'bar','bah','foo','bar']))
df = DataFrame(dict(A = np.array(['foo','bar','bah','foo','bar'])))
df['A'].iloc[0] = np.nan
result = df.head()
assert_frame_equal(result, expected)
df = DataFrame(dict(A = np.array(['foo','bar','bah','foo','bar'])))
df.A.iloc[0] = np.nan
result = df.head()
assert_frame_equal(result, expected)
def test_detect_chained_assignment(self):
pd.set_option('chained_assignment','raise')
# work with the chain
expected = DataFrame([[-5,1],[-6,3]],columns=list('AB'))
df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),dtype='int64')
self.assertIsNone(df.is_copy)
df['A'][0] = -5
df['A'][1] = -6
assert_frame_equal(df, expected)
# test with the chaining
df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
self.assertIsNone(df.is_copy)
def f():
df['A'][0] = -5
self.assertRaises(com.SettingWithCopyError, f)
def f():
df['A'][1] = np.nan
self.assertRaises(com.SettingWithCopyError, f)
self.assertIsNone(df['A'].is_copy)
# using a copy (the chain), fails
df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
def f():
df.loc[0]['A'] = -5
self.assertRaises(com.SettingWithCopyError, f)
# doc example
df = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
'c' : Series(range(7),dtype='int64') })
self.assertIsNone(df.is_copy)
expected = DataFrame({'a' : ['one', 'one', 'two',
'three', 'two', 'one', 'six'],
'c' : [42,42,2,3,4,42,6]})
def f():
indexer = df.a.str.startswith('o')
df[indexer]['c'] = 42
self.assertRaises(com.SettingWithCopyError, f)
expected = DataFrame({'A':[111,'bbb','ccc'],'B':[1,2,3]})
df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
def f():
df['A'][0] = 111
self.assertRaises(com.SettingWithCopyError, f)
def f():
df.loc[0]['A'] = 111
self.assertRaises(com.SettingWithCopyError, f)
df.loc[0,'A'] = 111
assert_frame_equal(df,expected)
# make sure that is_copy is picked up reconstruction
# GH5475
df = DataFrame({"A": [1,2]})
self.assertIsNone(df.is_copy)
with tm.ensure_clean('__tmp__pickle') as path:
df.to_pickle(path)
df2 = pd.read_pickle(path)
df2["B"] = df2["A"]
df2["B"] = df2["A"]
# a suprious raise as we are setting the entire column here
# GH5597
from string import ascii_letters as letters
def random_text(nobs=100):
df = []
for i in range(nobs):
idx= np.random.randint(len(letters), size=2)
idx.sort()
df.append([letters[idx[0]:idx[1]]])
return DataFrame(df, columns=['letters'])
df = random_text(100000)
# always a copy
x = df.iloc[[0,1,2]]
self.assertIsNotNone(x.is_copy)
x = df.iloc[[0,1,2,4]]
self.assertIsNotNone(x.is_copy)
# explicity copy
indexer = df.letters.apply(lambda x : len(x) > 10)
df = df.ix[indexer].copy()
self.assertIsNone(df.is_copy)
df['letters'] = df['letters'].apply(str.lower)
# implicity take
df = random_text(100000)
indexer = df.letters.apply(lambda x : len(x) > 10)
df = df.ix[indexer]
self.assertIsNotNone(df.is_copy)
df['letters'] = df['letters'].apply(str.lower)
# implicity take 2
df = random_text(100000)
indexer = df.letters.apply(lambda x : len(x) > 10)
df = df.ix[indexer]
self.assertIsNotNone(df.is_copy)
df.loc[:,'letters'] = df['letters'].apply(str.lower)
# should be ok even though it's a copy!
self.assertIsNone(df.is_copy)
df['letters'] = df['letters'].apply(str.lower)
self.assertIsNone(df.is_copy)
df = random_text(100000)
indexer = df.letters.apply(lambda x : len(x) > 10)
df.ix[indexer,'letters'] = df.ix[indexer,'letters'].apply(str.lower)
# an identical take, so no copy
df = DataFrame({'a' : [1]}).dropna()
self.assertIsNone(df.is_copy)
df['a'] += 1
# inplace ops
# original from: http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'), ('ears', 'right')]
events = {('eyes', 'left'): a, ('eyes', 'right'): b, ('ears', 'left'): c, ('ears', 'right'): d}
multiind = MultiIndex.from_tuples(tuples, names=['part', 'side'])
zed = DataFrame(events, index=['a', 'b'], columns=multiind)
def f():
zed['eyes']['right'].fillna(value=555, inplace=True)
self.assertRaises(com.SettingWithCopyError, f)
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0].sort_values()
assert_series_equal(s,df.iloc[:,0].sort_values())
assert_series_equal(s,df[0].sort_values())
# false positives GH6025
df = DataFrame ({'column1':['a', 'a', 'a'], 'column2': [4,8,9] })
str(df)
df['column1'] = df['column1'] + 'b'
str(df)
df = df [df['column2']!=8]
str(df)
df['column1'] = df['column1'] + 'c'
str(df)
# from SO: http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
def f():
df.iloc[0:5]['group'] = 'a'
self.assertRaises(com.SettingWithCopyError, f)
# mixed type setting
# same dtype & changing dtype
df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde')))
def f():
df.ix[2]['D'] = 'foo'
self.assertRaises(com.SettingWithCopyError, f)
def f():
df.ix[2]['C'] = 'foo'
self.assertRaises(com.SettingWithCopyError, f)
def f():
df['C'][2] = 'foo'
self.assertRaises(com.SettingWithCopyError, f)
def test_setting_with_copy_bug(self):
# operating on a copy
df = pd.DataFrame({'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']})
mask = pd.isnull(df.c)
def f():
df[['c']][mask] = df[['b']][mask]
self.assertRaises(com.SettingWithCopyError, f)
# invalid warning as we are returning a new object
# GH 8730
df1 = DataFrame({'x': Series(['a','b','c']), 'y': Series(['d','e','f'])})
df2 = df1[['x']]
# this should not raise
df2['y'] = ['g', 'h', 'i']
def test_detect_chained_assignment_warnings(self):
# warnings
with option_context('chained_assignment','warn'):
df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning):
df.loc[0]['A'] = 111
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
ser = {256: 2321.0, 1: 78.0, 2: 2716.0, 3: 0.0, 4: 369.0, 5: 0.0, 6: 269.0, 7: 0.0, 8: 0.0, 9: 0.0, 10: 3536.0, 11: 0.0, 12: 24.0, 13: 0.0, 14: 931.0, 15: 0.0, 16: 101.0, 17: 78.0, 18: 9643.0, 19: 0.0, 20: 0.0, 21: 0.0, 22: 63761.0, 23: 0.0, 24: 446.0, 25: 0.0, 26: 34773.0, 27: 0.0, 28: 729.0, 29: 78.0, 30: 0.0, 31: 0.0, 32: 3374.0, 33: 0.0, 34: 1391.0, 35: 0.0, 36: 361.0, 37: 0.0, 38: 61808.0, 39: 0.0, 40: 0.0, 41: 0.0, 42: 6677.0, 43: 0.0, 44: 802.0, 45: 0.0, 46: 2691.0, 47: 0.0, 48: 3582.0, 49: 0.0, 50: 734.0, 51: 0.0, 52: 627.0, 53: 70.0, 54: 2584.0, 55: 0.0, 56: 324.0, 57: 0.0, 58: 605.0, 59: 0.0, 60: 0.0, 61: 0.0, 62: 3989.0, 63: 10.0, 64: 42.0, 65: 0.0, 66: 904.0, 67: 0.0, 68: 88.0, 69: 70.0, 70: 8172.0, 71: 0.0, 72: 0.0, 73: 0.0, 74: 64902.0, 75: 0.0, 76: 347.0, 77: 0.0, 78: 36605.0, 79: 0.0, 80: 379.0, 81: 70.0, 82: 0.0, 83: 0.0, 84: 3001.0, 85: 0.0, 86: 1630.0, 87: 7.0, 88: 364.0, 89: 0.0, 90: 67404.0, 91: 9.0, 92: 0.0, 93: 0.0, 94: 7685.0, 95: 0.0, 96: 1017.0, 97: 0.0, 98: 2831.0, 99: 0.0, 100: 2963.0, 101: 0.0, 102: 854.0, 103: 0.0, 104: 0.0, 105: 0.0, 106: 0.0, 107: 0.0, 108: 0.0, 109: 0.0, 110: 0.0, 111: 0.0, 112: 0.0, 113: 0.0, 114: 0.0, 115: 0.0, 116: 0.0, 117: 0.0, 118: 0.0, 119: 0.0, 120: 0.0, 121: 0.0, 122: 0.0, 123: 0.0, 124: 0.0, 125: 0.0, 126: 67744.0, 127: 22.0, 128: 264.0, 129: 0.0, 260: 197.0, 268: 0.0, 265: 0.0, 269: 0.0, 261: 0.0, 266: 1198.0, 267: 0.0, 262: 2629.0, 258: 775.0, 257: 0.0, 263: 0.0, 259: 0.0, 264: 163.0, 250: 10326.0, 251: 0.0, 252: 1228.0, 253: 0.0, 254: 2769.0, 255: 0.0}
# smoke test for the repr
s = Series(ser)
result = s.value_counts()
str(result)
def test_floating_index_doc_example(self):
index = Index([1.5, 2, 3, 4.5, 5])
s = Series(range(5),index=index)
self.assertEqual(s[3], 2)
self.assertEqual(s.ix[3], 2)
self.assertEqual(s.loc[3], 2)
self.assertEqual(s.iloc[3], 3)
def test_floating_index(self):
# related 236
# scalar/slicing of a float index
s = Series(np.arange(5), index=np.arange(5) * 2.5, dtype=np.int64)
# label based slicing
result1 = s[1.0:3.0]
result2 = s.ix[1.0:3.0]
result3 = s.loc[1.0:3.0]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# exact indexing when found
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.ix[5.0]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
result1 = s[5]
result2 = s.loc[5]
result3 = s.ix[5]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
self.assertEqual(s[5.0], s[5])
# value not found (and no fallbacking at all)
# scalar integers
self.assertRaises(KeyError, lambda : s.loc[4])
self.assertRaises(KeyError, lambda : s.ix[4])
self.assertRaises(KeyError, lambda : s[4])
# fancy floats/integers create the correct entry (as nan)
# fancy tests
expected = Series([2, 0], index=Float64Index([5.0, 0.0]))
for fancy_idx in [[5.0, 0.0], np.array([5.0, 0.0])]: # float
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.ix[fancy_idx], expected)
expected = Series([2, 0], index=Index([5, 0], dtype='int64'))
for fancy_idx in [[5, 0], np.array([5, 0])]: #int
assert_series_equal(s[fancy_idx], expected)
assert_series_equal(s.loc[fancy_idx], expected)
assert_series_equal(s.ix[fancy_idx], expected)
# all should return the same as we are slicing 'the same'
result1 = s.loc[2:5]
result2 = s.loc[2.0:5.0]
result3 = s.loc[2.0:5]
result4 = s.loc[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# previously this did fallback indexing
result1 = s[2:5]
result2 = s[2.0:5.0]
result3 = s[2.0:5]
result4 = s[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s.ix[2:5]
result2 = s.ix[2.0:5.0]
result3 = s.ix[2.0:5]
result4 = s.ix[2.1:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
# combined test
result1 = s.loc[2:5]
result2 = s.ix[2:5]
result3 = s[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# list selection
result1 = s[[0.0,5,10]]
result2 = s.loc[[0.0,5,10]]
result3 = s.ix[[0.0,5,10]]
result4 = s.iloc[[0,2,4]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, result4)
result1 = s[[1.6,5,10]]
result2 = s.loc[[1.6,5,10]]
result3 = s.ix[[1.6,5,10]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([np.nan,2,4],index=[1.6,5,10]))
result1 = s[[0,1,2]]
result2 = s.ix[[0,1,2]]
result3 = s.loc[[0,1,2]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([0.0,np.nan,np.nan],index=[0,1,2]))
result1 = s.loc[[2.5, 5]]
result2 = s.ix[[2.5, 5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, Series([1,2],index=[2.5,5.0]))
result1 = s[[2.5]]
result2 = s.ix[[2.5]]
result3 = s.loc[[2.5]]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
assert_series_equal(result1, Series([1],index=[2.5]))
def test_scalar_indexer(self):
# float indexing checked above
def check_invalid(index, loc=None, iloc=None, ix=None, getitem=None):
# related 236/4850
# trying to access with a float index
s = Series(np.arange(len(index)),index=index)
if iloc is None:
iloc = TypeError
self.assertRaises(iloc, lambda : s.iloc[3.5])
if loc is None:
loc = TypeError
self.assertRaises(loc, lambda : s.loc[3.5])
if ix is None:
ix = TypeError
self.assertRaises(ix, lambda : s.ix[3.5])
if getitem is None:
getitem = TypeError
self.assertRaises(getitem, lambda : s[3.5])
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
check_invalid(index())
check_invalid(Index(np.arange(5) * 2.5),loc=KeyError, ix=KeyError, getitem=KeyError)
def check_index(index, error):
index = index()
s = Series(np.arange(len(index)),index=index)
# positional selection
result1 = s[5]
result2 = s[5.0]
result3 = s.iloc[5]
result4 = s.iloc[5.0]
# by value
self.assertRaises(error, lambda : s.loc[5])
self.assertRaises(error, lambda : s.loc[5.0])
# this is fallback, so it works
result5 = s.ix[5]
result6 = s.ix[5.0]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
self.assertEqual(result1, result4)
self.assertEqual(result1, result5)
self.assertEqual(result1, result6)
# string-like
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex ]:
check_index(index, KeyError)
# datetimelike
for index in [ tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex ]:
check_index(index, TypeError)
# exact indexing when found on IntIndex
s = Series(np.arange(10),dtype='int64')
result1 = s[5.0]
result2 = s.loc[5.0]
result3 = s.ix[5.0]
result4 = s[5]
result5 = s.loc[5]
result6 = s.ix[5]
self.assertEqual(result1, result2)
self.assertEqual(result1, result3)
self.assertEqual(result1, result4)
self.assertEqual(result1, result5)
self.assertEqual(result1, result6)
def test_slice_indexer(self):
def check_iloc_compat(s):
# invalid type for iloc (but works with a warning)
# check_stacklevel=False -> impossible to get it right for all
# index types
with self.assert_produces_warning(
FutureWarning, check_stacklevel=False):
s.iloc[6.0:8]
with self.assert_produces_warning(
FutureWarning, check_stacklevel=False):
s.iloc[6.0:8.0]
with self.assert_produces_warning(
FutureWarning, check_stacklevel=False):
s.iloc[6:8.0]
def check_slicing_positional(index):
s = Series(np.arange(len(index))+10,index=index)
# these are all positional
result1 = s[2:5]
result2 = s.ix[2:5]
result3 = s.iloc[2:5]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# loc will fail
self.assertRaises(TypeError, lambda : s.loc[2:5])
# make all float slicing fail
self.assertRaises(TypeError, lambda : s[2.0:5])
self.assertRaises(TypeError, lambda : s[2.0:5.0])
self.assertRaises(TypeError, lambda : s[2:5.0])
self.assertRaises(TypeError, lambda : s.ix[2.0:5])
self.assertRaises(TypeError, lambda : s.ix[2.0:5.0])
self.assertRaises(TypeError, lambda : s.ix[2:5.0])
self.assertRaises(TypeError, lambda : s.loc[2.0:5])
self.assertRaises(TypeError, lambda : s.loc[2.0:5.0])
self.assertRaises(TypeError, lambda : s.loc[2:5.0])
check_iloc_compat(s)
# all index types except int, float
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex ]:
check_slicing_positional(index())
############
# IntIndex #
############
index = tm.makeIntIndex()
s = Series(np.arange(len(index),dtype='int64')+10,index+5)
# this is positional
result1 = s[2:5]
result4 = s.iloc[2:5]
assert_series_equal(result1, result4)
# these are all label based
result2 = s.ix[2:5]
result3 = s.loc[2:5]
assert_series_equal(result2, result3)
# float slicers on an int index
expected = Series([11,12,13],index=[6,7,8])
for method in [lambda x: x.loc, lambda x: x.ix]:
result = method(s)[6.0:8.5]
assert_series_equal(result, expected)
result = method(s)[5.5:8.5]
assert_series_equal(result, expected)
result = method(s)[5.5:8.0]
assert_series_equal(result, expected)
# make all float slicing fail for [] with an int index
self.assertRaises(TypeError, lambda : s[6.0:8])
self.assertRaises(TypeError, lambda : s[6.0:8.0])
self.assertRaises(TypeError, lambda : s[6:8.0])
check_iloc_compat(s)
##############
# FloatIndex #
##############
s.index = s.index.astype('float64')
# these are all value based
result1 = s[6:8]
result2 = s.ix[6:8]
result3 = s.loc[6:8]
assert_series_equal(result1, result2)
assert_series_equal(result1, result3)
# these are valid for all methods
# these are treated like labels (e.g. the rhs IS included)
def compare(slicers, expected):
for method in [lambda x: x, lambda x: x.loc, lambda x: x.ix ]:
for slices in slicers:
result = method(s)[slices]
assert_series_equal(result, expected)
compare([slice(6.0,8),slice(6.0,8.0),slice(6,8.0)],
s[(s.index>=6.0)&(s.index<=8)])
compare([slice(6.5,8),slice(6.5,8.5)],
s[(s.index>=6.5)&(s.index<=8.5)])
compare([slice(6,8.5)],
s[(s.index>=6.0)&(s.index<=8.5)])
compare([slice(6.5,6.5)],
s[(s.index>=6.5)&(s.index<=6.5)])
check_iloc_compat(s)
def test_set_ix_out_of_bounds_axis_0(self):
df = pd.DataFrame(randn(2, 5), index=["row%s" % i for i in range(2)], columns=["col%s" % i for i in range(5)])
self.assertRaises(ValueError, df.ix.__setitem__, (2, 0), 100)
def test_set_ix_out_of_bounds_axis_1(self):
df = pd.DataFrame(randn(5, 2), index=["row%s" % i for i in range(5)], columns=["col%s" % i for i in range(2)])
self.assertRaises(ValueError, df.ix.__setitem__, (0 , 2), 100)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
def test_loc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
assert_frame_equal(df.loc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.loc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.loc[[]], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
def test_ix_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
assert_frame_equal(df.ix[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.ix[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
assert_frame_equal(df.ix[[]], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
def test_deprecate_float_indexers(self):
# GH 4892
# deprecate allowing float indexers that are equal to ints to be used
# as indexers in non-float indices
import warnings
warnings.filterwarnings(action='error', category=FutureWarning)
def check_index(index):
i = index(5)
for s in [ Series(np.arange(len(i)),index=i), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]:
self.assertRaises(FutureWarning, lambda :
s.iloc[3.0])
# setting
def f():
s.iloc[3.0] = 0
self.assertRaises(FutureWarning, f)
# fallsback to position selection ,series only
s = Series(np.arange(len(i)),index=i)
s[3]
self.assertRaises(FutureWarning, lambda : s[3.0])
for index in [ tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex, tm.makePeriodIndex ]:
check_index(index)
# ints
i = index(5)
for s in [ Series(np.arange(len(i))), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]:
self.assertRaises(FutureWarning, lambda :
s.iloc[3.0])
# on some arch's this doesn't provide a warning (and thus raise)
# and some it does
try:
s[3.0]
except:
pass
# setting
def f():
s.iloc[3.0] = 0
self.assertRaises(FutureWarning, f)
# floats: these are all ok!
i = np.arange(5.)
for s in [ Series(np.arange(len(i)),index=i), DataFrame(np.random.randn(len(i),len(i)),index=i,columns=i) ]:
with tm.assert_produces_warning(False):
s[3.0]
with tm.assert_produces_warning(False):
s[3]
self.assertRaises(FutureWarning, lambda :
s.iloc[3.0])
with tm.assert_produces_warning(False):
s.iloc[3]
with tm.assert_produces_warning(False):
s.loc[3.0]
with tm.assert_produces_warning(False):
s.loc[3]
def f():
s.iloc[3.0] = 0
self.assertRaises(FutureWarning, f)
# slices
for index in [ tm.makeIntIndex, tm.makeFloatIndex,
tm.makeStringIndex, tm.makeUnicodeIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
index = index(5)
for s in [ Series(range(5),index=index), DataFrame(np.random.randn(5,2),index=index) ]:
# getitem
self.assertRaises(FutureWarning, lambda :
s.iloc[3.0:4])
self.assertRaises(FutureWarning, lambda :
s.iloc[3.0:4.0])
self.assertRaises(FutureWarning, lambda :
s.iloc[3:4.0])
# setitem
def f():
s.iloc[3.0:4] = 0
self.assertRaises(FutureWarning, f)
def f():
s.iloc[3:4.0] = 0
self.assertRaises(FutureWarning, f)
def f():
s.iloc[3.0:4.0] = 0
self.assertRaises(FutureWarning, f)
warnings.filterwarnings(action='ignore', category=FutureWarning)
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10),
1.0: np.random.rand(10)})
df['a'] = 10
tm.assert_frame_equal(DataFrame({0.0: df[0.0],
1.0: df[1.0],
'a': [10] * 10}),
df)
def test_duplicate_ix_returns_series(self):
df = DataFrame(np.random.randn(3, 3), index=[0.1, 0.2, 0.2],
columns=list('abc'))
r = df.ix[0.2, 'a']
e = df.loc[0.2, 'a']
tm.assert_series_equal(r, e)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({'a': [1,2,3], 'b': [3,4,5]},index=[1.,2.,3.])
df.loc[df.index[:2]] = 1
expected = DataFrame({'a':[1,1,3],'b':[1,1,5]},index=df.index)
tm.assert_frame_equal(expected, df)
df = DataFrame({'a': [1,2,3], 'b': [3,4,5]},index=[1.,2.,3.])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df,df2)
def test_float_index_at_iat(self):
s = pd.Series([1, 2, 3], index=[0.1, 0.2, 0.3])
for el, item in s.iteritems():
self.assertEqual(s.at[el], item)
for i in range(len(s)):
self.assertEqual(s.iat[i], i + 1)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right):
# label, index, slice
r, i, s = list('bcd'), [1, 2, 3], slice(1, 4)
c, j, l = ['joe', 'jolie'], [1, 2], slice(1, 3)
left = df.copy()
left.loc[r, c] = rhs
assert_frame_equal(left, right)
left = df.copy()
left.iloc[i, j] = rhs
assert_frame_equal(left, right)
left = df.copy()
left.ix[s, l] = rhs
assert_frame_equal(left, right)
left = df.copy()
left.ix[i, j] = rhs
assert_frame_equal(left, right)
left = df.copy()
left.ix[r, c] = rhs
assert_frame_equal(left, right)
xs = np.arange(20).reshape(5, 4)
cols = ['jim', 'joe', 'jolie', 'joline']
df = pd.DataFrame(xs, columns=cols, index=list('abcde'))
# right hand side; permute the indices and multiplpy by -2
rhs = - 2 * df.iloc[3:0:-1, 2:0:-1]
# expected `right` result; just multiply by -2
right = df.copy()
right.iloc[1:4, 1:3] *= -2
# run tests with uniform dtypes
run_tests(df, rhs, right)
# make frames multi-type & re-run tests
for frame in [df, rhs, right]:
frame['joe'] = frame['joe'].astype('float64')
frame['jolie'] = frame['jolie'].map('@{0}'.format)
run_tests(df, rhs, right)
def test_str_label_slicing_with_negative_step(self):
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
if not idx.is_integer:
# For integer indices, ix and plain getitem are position-based.
assert_series_equal(s[l_slc], s.iloc[i_slc])
assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
for idx in [_mklbl('A', 20), np.arange(20) + 100,
np.linspace(100, 150, 20)]:
idx = Index(idx)
s = Series(np.arange(20), index=idx)
assert_slices_equivalent(SLC[idx[9]::-1], SLC[9::-1])
assert_slices_equivalent(SLC[:idx[9]:-1], SLC[:8:-1])
assert_slices_equivalent(SLC[idx[13]:idx[9]:-1], SLC[13:8:-1])
assert_slices_equivalent(SLC[idx[9]:idx[13]:-1], SLC[:0])
def test_multiindex_label_slicing_with_negative_step(self):
s = Series(np.arange(20),
MultiIndex.from_product([list('abcde'), np.arange(4)]))
SLC = pd.IndexSlice
def assert_slices_equivalent(l_slc, i_slc):
assert_series_equal(s.loc[l_slc], s.iloc[i_slc])
assert_series_equal(s[l_slc], s.iloc[i_slc])
assert_series_equal(s.ix[l_slc], s.iloc[i_slc])
assert_slices_equivalent(SLC[::-1], SLC[::-1])
assert_slices_equivalent(SLC['d'::-1], SLC[15::-1])
assert_slices_equivalent(SLC[('d',)::-1], SLC[15::-1])
assert_slices_equivalent(SLC[:'d':-1], SLC[:11:-1])
assert_slices_equivalent(SLC[:('d',):-1], SLC[:11:-1])
assert_slices_equivalent(SLC['d':'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d',):'b':-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['d':('b',):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC[('d',):('b',):-1], SLC[15:3:-1])
assert_slices_equivalent(SLC['b':'d':-1], SLC[:0])
assert_slices_equivalent(SLC[('c', 2)::-1], SLC[10::-1])
assert_slices_equivalent(SLC[:('c', 2):-1], SLC[:9:-1])
assert_slices_equivalent(SLC[('e', 0):('c', 2):-1], SLC[16:9:-1])
def test_slice_with_zero_step_raises(self):
s = Series(np.arange(20), index=_mklbl('A', 20))
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.loc[::0])
self.assertRaisesRegexp(ValueError, 'slice step cannot be zero',
lambda: s.ix[::0])
def test_indexing_assignment_dict_already_exists(self):
df = pd.DataFrame({'x': [1, 2, 6],
'y': [2, 2, 8],
'z': [-5, 0, 5]}).set_index('z')
expected = df.copy()
rhs = dict(x=9, y=99)
df.loc[5] = rhs
expected.loc[5] = [9, 99]
tm.assert_frame_equal(df, expected)
def test_indexing_dtypes_on_empty(self):
# Check that .iloc and .ix return correct dtypes GH9983
df = DataFrame({'a':[1,2,3],'b':['b','b2','b3']})
df2 = df.ix[[],:]
self.assertEqual(df2.loc[:,'a'].dtype, np.int64)
assert_series_equal(df2.loc[:,'a'], df2.iloc[:,0])
assert_series_equal(df2.loc[:,'a'], df2.ix[:,0])
def test_range_in_series_indexing(self):
# range can cause an indexing error
# GH 11652
for x in [5, 999999, 1000000]:
s = pd.Series(index=range(x))
s.loc[range(1)] = 42
assert_series_equal(s.loc[range(1)],Series(42.0,index=[0]))
s.loc[range(2)] = 43
assert_series_equal(s.loc[range(2)],Series(43.0,index=[0,1]))
@slow
def test_large_dataframe_indexing(self):
#GH10692
result = DataFrame({'x': range(10**6)},dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10**6 + 1)},dtype='int64')
assert_frame_equal(result, expected)
@slow
def test_large_mi_dataframe_indexing(self):
#GH10645
result = MultiIndex.from_arrays([range(10**6), range(10**6)])
assert(not (10**6, 0) in result)
def test_non_reducing_slice(self):
df = pd.DataFrame([[0, 1], [2, 3]])
slices = [
# pd.IndexSlice[:, :],
pd.IndexSlice[:, 1],
pd.IndexSlice[1, :],
pd.IndexSlice[[1], [1]],
pd.IndexSlice[1, [1]],
pd.IndexSlice[[1], 1],
pd.IndexSlice[1],
pd.IndexSlice[1, 1],
slice(None, None, None),
[0, 1],
np.array([0, 1]),
pd.Series([0, 1])
]
for slice_ in slices:
tslice_ = _non_reducing_slice(slice_)
self.assertTrue(isinstance(df.loc[tslice_], DataFrame))
def test_list_slice(self):
# like dataframe getitem
slices = [['A'], pd.Series(['A']), np.array(['A'])]
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, index=['A', 'B'])
expected = pd.IndexSlice[:, ['A']]
for subset in slices:
result = _non_reducing_slice(subset)
tm.assert_frame_equal(df.loc[result], df.loc[expected])
def test_maybe_numeric_slice(self):
df = pd.DataFrame({'A': [1, 2], 'B': ['c', 'd'], 'C': [True, False]})
result = _maybe_numeric_slice(df, slice_=None)
expected = pd.IndexSlice[:, ['A']]
self.assertEqual(result, expected)
result = _maybe_numeric_slice(df, None, include_bool=True)
expected = pd.IndexSlice[:, ['A', 'C']]
result = _maybe_numeric_slice(df, [1])
expected = [1]
self.assertEqual(result, expected)
class TestCategoricalIndex(tm.TestCase):
def setUp(self):
self.df = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B')
self.df2 = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series(list('aabbca')).astype('category',categories=list('cabe')) }).set_index('B')
self.df3 = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series([1,1,2,1,3,2]).astype('category',categories=[3,2,1],ordered=True) }).set_index('B')
self.df4 = DataFrame({'A' : np.arange(6,dtype='int64'),
'B' : Series([1,1,2,1,3,2]).astype('category',categories=[3,2,1],ordered=False) }).set_index('B')
def test_loc_scalar(self):
result = self.df.loc['a']
expected = DataFrame({'A' : [0,1,5],
'B' : Series(list('aaa')).astype('category',categories=list('cab')) }).set_index('B')
assert_frame_equal(result, expected)
df = self.df.copy()
df.loc['a'] = 20
expected = DataFrame({'A' : [20,20,2,3,4,20],
'B' : Series(list('aabbca')).astype('category',categories=list('cab')) }).set_index('B')
assert_frame_equal(df, expected)
# value not in the categories
self.assertRaises(KeyError, lambda : df.loc['d'])
def f():
df.loc['d'] = 10
self.assertRaises(TypeError, f)
def f():
df.loc['d','A'] = 10
self.assertRaises(TypeError, f)
def f():
df.loc['d','C'] = 10
self.assertRaises(TypeError, f)
def test_loc_listlike(self):
# list of labels
result = self.df.loc[['c','a']]
expected = self.df.iloc[[4,0,1,5]]
assert_frame_equal(result, expected)
# ToDo: check_index_type can be True after GH XXX
result = self.df2.loc[['a','b','e']]
exp_index = pd.CategoricalIndex(list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A' : [0,1,5,2,3,np.nan]}, index=exp_index)
assert_frame_equal(result, expected, check_index_type=False)
# element in the categories but not in the values
self.assertRaises(KeyError, lambda : self.df2.loc['e'])
# assign is ok
df = self.df2.copy()
df.loc['e'] = 20
result = df.loc[['a','b','e']]
exp_index = pd.CategoricalIndex(list('aaabbe'), categories=list('cabe'), name='B')
expected = DataFrame({'A' : [0,1,5,2,3,20]}, index=exp_index)
assert_frame_equal(result, expected)
df = self.df2.copy()
result = df.loc[['a','b','e']]
expected = DataFrame({'A' : [0,1,5,2,3,np.nan],
'B' : Series(list('aaabbe')).astype('category',categories=list('cabe')) }).set_index('B')
assert_frame_equal(result, expected, check_index_type=False)
# not all labels in the categories
self.assertRaises(KeyError, lambda : self.df2.loc[['a','d']])
def test_read_only_source(self):
# GH 10043
rw_array = np.eye(10)
rw_df = DataFrame(rw_array)
ro_array = np.eye(10)
ro_array.setflags(write=False)
ro_df = DataFrame(ro_array)
assert_frame_equal(rw_df.iloc[[1,2,3]],ro_df.iloc[[1,2,3]])
assert_frame_equal(rw_df.iloc[[1]],ro_df.iloc[[1]])
assert_series_equal(rw_df.iloc[1],ro_df.iloc[1])
assert_frame_equal(rw_df.iloc[1:3],ro_df.iloc[1:3])
assert_frame_equal(rw_df.loc[[1,2,3]],ro_df.loc[[1,2,3]])
assert_frame_equal(rw_df.loc[[1]],ro_df.loc[[1]])
assert_series_equal(rw_df.loc[1],ro_df.loc[1])
assert_frame_equal(rw_df.loc[1:3],ro_df.loc[1:3])
def test_reindexing(self):
# reindexing
# convert to a regular index
result = self.df2.reindex(['a','b','e'])
expected = DataFrame({'A' : [0,1,5,2,3,np.nan],
'B' : Series(list('aaabbe')) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['a','b'])
expected = DataFrame({'A' : [0,1,5,2,3],
'B' : Series(list('aaabb')) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['e'])
expected = DataFrame({'A' : [np.nan],
'B' : Series(['e']) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['d'])
expected = DataFrame({'A' : [np.nan],
'B' : Series(['d']) }).set_index('B')
assert_frame_equal(result, expected)
# since we are actually reindexing with a Categorical
# then return a Categorical
cats = list('cabe')
result = self.df2.reindex(pd.Categorical(['a','d'],categories=cats))
expected = DataFrame({'A' : [0,1,5,np.nan],
'B' : Series(list('aaad')).astype('category',categories=cats) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(pd.Categorical(['a'],categories=cats))
expected = DataFrame({'A' : [0,1,5],
'B' : Series(list('aaa')).astype('category',categories=cats) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['a','b','e'])
expected = DataFrame({'A' : [0,1,5,2,3,np.nan],
'B' : Series(list('aaabbe')) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['a','b'])
expected = DataFrame({'A' : [0,1,5,2,3],
'B' : Series(list('aaabb')) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(['e'])
expected = DataFrame({'A' : [np.nan],
'B' : Series(['e']) }).set_index('B')
assert_frame_equal(result, expected)
# give back the type of categorical that we received
result = self.df2.reindex(pd.Categorical(['a','d'],categories=cats,ordered=True))
expected = DataFrame({'A' : [0,1,5,np.nan],
'B' : Series(list('aaad')).astype('category',categories=cats,ordered=True) }).set_index('B')
assert_frame_equal(result, expected)
result = self.df2.reindex(pd.Categorical(['a','d'],categories=['a','d']))
expected = DataFrame({'A' : [0,1,5,np.nan],
'B' : Series(list('aaad')).astype('category',categories=['a','d']) }).set_index('B')
assert_frame_equal(result, expected)
# passed duplicate indexers are not allowed
self.assertRaises(ValueError, lambda : self.df2.reindex(['a','a']))
# args NotImplemented ATM
self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],method='ffill'))
self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],level=1))
self.assertRaises(NotImplementedError, lambda : self.df2.reindex(['a'],limit=2))
def test_loc_slice(self):
# slicing
# not implemented ATM
# GH9748
self.assertRaises(TypeError, lambda : self.df.loc[1:5])
#result = df.loc[1:5]
#expected = df.iloc[[1,2,3,4]]
#assert_frame_equal(result, expected)
def test_boolean_selection(self):
df3 = self.df3
df4 = self.df4
result = df3[df3.index == 'a']
expected = df3.iloc[[]]
assert_frame_equal(result,expected)
result = df4[df4.index == 'a']
expected = df4.iloc[[]]
assert_frame_equal(result,expected)
result = df3[df3.index == 1]
expected = df3.iloc[[0,1,3]]
assert_frame_equal(result,expected)
result = df4[df4.index == 1]
expected = df4.iloc[[0,1,3]]
assert_frame_equal(result,expected)
# since we have an ordered categorical
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=True,
# name=u'B')
result = df3[df3.index < 2]
expected = df3.iloc[[4]]
assert_frame_equal(result,expected)
result = df3[df3.index > 1]
expected = df3.iloc[[]]
assert_frame_equal(result,expected)
# unordered
# cannot be compared
# CategoricalIndex([1, 1, 2, 1, 3, 2],
# categories=[3, 2, 1],
# ordered=False,
# name=u'B')
self.assertRaises(TypeError, lambda : df4[df4.index < 2])
self.assertRaises(TypeError, lambda : df4[df4.index > 1])
class TestSeriesNoneCoercion(tm.TestCase):
EXPECTED_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[0] = None
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
def test_coercion_with_loc_setitem(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[0] = None
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
def test_coercion_with_setitem_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series[start_series == start_series[0]] = None
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
def test_coercion_with_loc_and_series(self):
for start_data, expected_result in self.EXPECTED_RESULTS:
start_series = Series(start_data)
start_series.loc[start_series == start_series[0]] = None
expected_series = Series(expected_result)
assert_attr_equal('dtype', start_series, expected_series)
tm.assert_numpy_array_equal(
start_series.values,
expected_series.values, strict_nan=True)
class TestDataframeNoneCoercion(tm.TestCase):
EXPECTED_SINGLE_ROW_RESULTS = [
# For numeric series, we should coerce to NaN.
([1, 2, 3], [np.nan, 2, 3]),
([1.0, 2.0, 3.0], [np.nan, 2.0, 3.0]),
# For datetime series, we should coerce to NaT.
([datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
[NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)]),
# For objects, we should preserve the None value.
(["foo", "bar", "baz"], [None, "bar", "baz"]),
]
def test_coercion_with_loc(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[0, ['foo']] = None
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
def test_coercion_with_setitem_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe[start_dataframe['foo'] == start_dataframe['foo'][0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
def test_none_coercion_loc_and_dataframe(self):
for start_data, expected_result, in self.EXPECTED_SINGLE_ROW_RESULTS:
start_dataframe = DataFrame({'foo': start_data})
start_dataframe.loc[start_dataframe['foo'] == start_dataframe['foo'][0]] = None
expected_dataframe = DataFrame({'foo': expected_result})
assert_attr_equal('dtype', start_dataframe['foo'], expected_dataframe['foo'])
tm.assert_numpy_array_equal(
start_dataframe['foo'].values,
expected_dataframe['foo'].values, strict_nan=True)
def test_none_coercion_mixed_dtypes(self):
start_dataframe = DataFrame({
'a': [1, 2, 3],
'b': [1.0, 2.0, 3.0],
'c': [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)],
'd': ['a', 'b', 'c']})
start_dataframe.iloc[0] = None
expected_dataframe = DataFrame({
'a': [np.nan, 2, 3],
'b': [np.nan, 2.0, 3.0],
'c': [NaT, datetime(2000, 1, 2), datetime(2000, 1, 3)],
'd': [None, 'b', 'c']})
for column in expected_dataframe.columns:
assert_attr_equal('dtype', start_dataframe[column], expected_dataframe[column])
tm.assert_numpy_array_equal(
start_dataframe[column].values,
expected_dataframe[column].values, strict_nan=True)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
shenqicang/openmc | src/utils/tally_conv.py | 4 | 16636 | #!/usr/bin/env python
# This program takes OpenMC statepoint binary files and creates a variety of
# outputs from them which should provide the user with an idea of the
# convergence behavior of all the tallies and filters defined by the user in
# tallies.xml. The program can directly plot the value and errors of each
# tally, filter, score combination; it can save these plots to a file; and
# it can also save the data used in these plots to a CSV file for importing in
# to other plotting packages such as Excel, gnuplot, MathGL, or Veusz.
# To use the program, run this program from the working directory of the openMC
# problem to analyze.
# The USER OPTIONS block below provides four options for the user to set:
# fileType, printxs, showImg, and savetoCSV. See the options block for more
# information.
from __future__ import print_function
from math import sqrt, pow
from glob import glob
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
from statepoint import StatePoint
##################################### USER OPTIONS
# Set filetype (the file extension desired, without the period.)
# Options are backend dependent, but most backends support png, pdf, ps, eps
# and svg. Write "none" if no saved files are desired.
fileType = "none"
# Set if cross-sections or reaction rates are desired printxs = True means X/S
printxs = False
# Set if the figures should be displayed to screen or not (True means show)
showImg = False
# Save to CSV for use in more advanced plotting programs like GNUPlot, MathGL
savetoCSV = True
##################################### END USER OPTIONS
## Find if tallies.xml exists.
#if glob('./tallies.xml') != None:
# # It exists
# tallyData = talliesXML('tallies.xml')
#else:
# # It does not exist.
# tallyData = None
# Find all statepoints in this directory.
files = glob('./statepoint.*.binary')
fileNums = []
begin = 13
# Arrange the file list in increasing batch order
for i in range(len(files)):
end = files[i].find(".binary")
fileNums.append(int(files[i][begin:end]))
fileNums.sort()
# Re-make filenames
files = []
for i in range(len(fileNums)):
files.append("./statepoint." + str(fileNums[i]) + ".binary")
# Initialize arrays as needed
mean = [None for x in range(len(files))]
uncert = [None for x in range(len(files))]
scoreType = [None for x in range(len(files))]
active_batches = [None for x in range(len(files))]
for i_batch in range(len(files)):
# Get filename
batch_filename = files[i_batch]
# Create StatePoint object
sp = StatePoint(batch_filename)
# Read number of realizations for global tallies
sp.n_realizations = sp._get_int()[0]
# Read global tallies
n_global_tallies = sp._get_int()[0]
sp.global_tallies = np.array(sp._get_double(2*n_global_tallies))
sp.global_tallies.shape = (n_global_tallies, 2)
# Flag indicating if tallies are present
tallies_present = sp._get_int()[0]
# Check if tallies are present
if not tallies_present:
raise Exception("No tally data in state point!")
# Increase the dimensionality of our main variables
mean[i_batch] = [None for x in range(len(sp.tallies))]
uncert[i_batch] = [None for x in range(len(sp.tallies))]
scoreType[i_batch] = [None for x in range(len(sp.tallies))]
# Loop over all tallies
for i_tally, t in enumerate(sp.tallies):
# Calculate t-value for 95% two-sided CI
n = t.n_realizations
t_value = scipy.stats.t.ppf(0.975, n - 1)
# Store the batch count
active_batches[i_batch] = n
# Resize the 2nd dimension
mean[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
uncert[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
scoreType[i_batch][i_tally] = [None for x in range(t.total_filter_bins)]
for i_filter in range(t.total_filter_bins):
# Resize the 3rd dimension
mean[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
uncert[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
scoreType[i_batch][i_tally][i_filter] = [None for x in range(t.n_nuclides)]
print(t.total_filter_bins,t.n_nuclides)
for i_nuclide in range(t.n_nuclides):
mean[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
uncert[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
scoreType[i_batch][i_tally][i_filter][i_nuclide] = \
[None for x in range(t.n_scores)]
for i_score in range(t.n_scores):
scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score] = \
t.scores[i_score]
s, s2 = sp._get_double(2)
s /= n
mean[i_batch][i_tally][i_filter][i_nuclide][i_score] = s
if s != 0.0:
relative_error = t_value*sqrt((s2/n - s*s)/(n-1))/s
else:
relative_error = 0.0
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score] = relative_error
# Reorder the data lists in to a list order more conducive for plotting:
# The indexing should be: [tally][filter][score][batch]
meanPlot = [None for x in range(len(mean[0]))] # Set to the number of tallies
uncertPlot = [None for x in range(len(mean[0]))] # Set to the number of tallies
absUncertPlot = [None for x in range(len(mean[0]))] # Set to number of tallies
filterLabel = [None for x in range(len(mean[0]))] #Set to the number of tallies
fluxLoc = [None for x in range(len(mean[0]))] # Set to the number of tallies
printxs = [False for x in range(len(mean[0]))] # Set to the number of tallies
# Get and set the correct sizes for the rest of the dimensions
for i_tally in range(len(meanPlot)):
# Set 2nd (score) dimension
meanPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
uncertPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
absUncertPlot[i_tally] = [None for x in range(len(mean[0][i_tally]))]
filterLabel[i_tally] = [None for x in range(len(mean[0][i_tally]))]
# Initialize flux location so it will be -1 if not found
fluxLoc[i_tally] = -1
for i_filter in range(len(meanPlot[i_tally])):
# Set 3rd (filter) dimension
meanPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
uncertPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
absUncertPlot[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
filterLabel[i_tally][i_filter] = \
[None for x in range(len(mean[0][i_tally][i_filter]))]
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
# Set 4th (nuclide)) dimension
meanPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
uncertPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
absUncertPlot[i_tally][i_filter][i_nuclide] = \
[None for x in range(len(mean[0][i_tally][i_filter][i_nuclide]))]
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set 5th (batch) dimension
meanPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
uncertPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
absUncertPlot[i_tally][i_filter][i_nuclide][i_score] = \
[None for x in range(len(mean))]
# Get filterLabel (this should be moved to its own function)
#??? How to do?
# Set flux location if found
# all batches and all tallies will have the same score ordering, hence
# the 0's in the 1st, 3rd, and 4th dimensions.
if scoreType[0][i_tally][0][0][i_score] == 'flux':
fluxLoc[i_tally] = i_score
# Set printxs array according to the printxs input
if printxs:
for i_tally in range(len(fluxLoc)):
if fluxLoc[i_tally] != -1:
printxs[i_tally] = True
# Now rearrange the data as suitable, and perform xs conversion if necessary
for i_batch in range(len(mean)):
for i_tally in range(len(mean[i_batch])):
for i_filter in range(len(mean[i_batch][i_tally])):
for i_nuclide in range(len(mean[i_batch][i_tally][i_filter])):
for i_score in range(len(mean[i_batch][i_tally][i_filter][i_nuclide])):
if (printxs[i_tally] and \
((scoreType[0][i_tally][i_filter][i_nuclide][i_score] != 'flux') and \
(scoreType[0][i_tally][i_filter][i_nuclide][i_score] != 'current'))):
# Perform rate to xs conversion
# mean is mean/fluxmean
meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score] / \
mean[i_batch][i_tally][i_filter][i_nuclide][fluxLoc[i_tally]]
# Update the relative uncertainty via error propagation
uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
sqrt(pow(uncert[i_batch][i_tally][i_filter][i_nuclide][i_score],2) \
+ pow(uncert[i_batch][i_tally][i_filter][i_nuclide][fluxLoc[i_tally]],2))
else:
# Do not perform rate to xs conversion
meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score]
uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score]
# Both have the same absolute uncertainty calculation
absUncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch] = \
uncert[i_batch][i_tally][i_filter][i_nuclide][i_score] * \
mean[i_batch][i_tally][i_filter][i_nuclide][i_score]
# Set plotting constants
xLabel = "Batches"
xLabel = xLabel.title() # not necessary for now, but is left in to handle if
# the previous line changes
# Begin plotting
for i_tally in range(len(meanPlot)):
# Set tally string (placeholder until I put tally labels in statePoint)
tallyStr = "Tally " + str(i_tally + 1)
for i_filter in range(len(meanPlot[i_tally])):
# Set filter string
filterStr = "Filter " + str(i_filter + 1)
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
nuclideStr = "Nuclide " + str(i_nuclide + 1)
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set score string
scoreStr = scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score]
scoreStr = scoreStr.title()
if (printxs[i_tally] and ((scoreStr != 'Flux') and \
(scoreStr != 'Current'))):
scoreStr = scoreStr + "-XS"
# set Title
title = "Convergence of " + scoreStr + " in " + tallyStr + " for "\
+ filterStr + " and " + nuclideStr
# set yLabel
yLabel = scoreStr
yLabel = yLabel.title()
# Set saving filename
fileName = "tally_" + str(i_tally + 1) + "_" + scoreStr + \
"_filter_" + str(i_filter+1) + "_nuclide_" + str(i_nuclide+1) \
+ "." + fileType
REfileName = "tally_" + str(i_tally + 1) + "_" + scoreStr + \
"RE_filter_" + str(i_filter+1) + "_nuclide_" + str(i_nuclide+1) \
+ "." + fileType
# Plot mean with absolute error bars
plt.errorbar(active_batches, \
meanPlot[i_tally][i_filter][i_nuclide][i_score][:], \
absUncertPlot[i_tally][i_filter][i_nuclide][i_score][:],fmt='o-',aa=True)
plt.xlabel(xLabel)
plt.ylabel(yLabel)
plt.title(title)
if (fileType != 'none'):
plt.savefig(fileName)
if showImg:
plt.show()
plt.clf()
# Plot relative uncertainty
plt.plot(active_batches, \
uncertPlot[i_tally][i_filter][i_nuclide][i_score][:],'o-',aa=True)
plt.xlabel(xLabel)
plt.ylabel("Relative Error of " + yLabel)
plt.title("Relative Error of " + title)
if (fileType != 'none'):
plt.savefig(REfileName)
if showImg:
plt.show()
plt.clf()
if savetoCSV:
# This block loops through each tally, and for each tally:
# Creates a new file
# Writes the scores and filters for that tally in csv format.
# The columns will be: batches,then for each filter: all the scores
# The rows, of course, are the data points per batch.
for i_tally in range(len(meanPlot)):
# Set tally string (placeholder until I put tally labels in statePoint)
tallyStr = "Tally " + str(i_tally + 1)
CSV_filename = "./tally" + str(i_tally+1)+".csv"
# Open the file
f = open(CSV_filename, 'w')
# Write the header line
lineText = "Batches"
for i_filter in range(len(meanPlot[i_tally])):
# Set filter string
filterStr = "Filter " + str(i_filter + 1)
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
nuclideStr = "Nuclide " + str(i_nuclide + 1)
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
# Set the title
scoreStr = scoreType[i_batch][i_tally][i_filter][i_nuclide][i_score]
scoreStr = scoreStr.title()
if (printxs[i_tally] and ((scoreStr != 'Flux') and \
(scoreStr != 'Current'))):
scoreStr = scoreStr + "-XS"
# set header
headerText = scoreStr + " for " + filterStr + " for " + nuclideStr
lineText = lineText + "," + headerText + \
",Abs Unc of " + headerText + \
",Rel Unc of " + headerText
f.write(lineText + "\n")
# Write the data lines, each row is a different batch
for i_batch in range(len(meanPlot[i_tally][0][0][0])):
lineText = repr(active_batches[i_batch])
for i_filter in range(len(meanPlot[i_tally])):
for i_nuclide in range(len(meanPlot[i_tally][i_filter])):
for i_score in range(len(meanPlot[i_tally][i_filter][i_nuclide])):
fieldText = \
repr(meanPlot[i_tally][i_filter][i_nuclide][i_score][i_batch]) + \
"," + \
repr(absUncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch]) +\
"," + \
repr(uncertPlot[i_tally][i_filter][i_nuclide][i_score][i_batch])
lineText = lineText + "," + fieldText
f.write(lineText + "\n")
| mit |
cdiazbas/LMpyMilne | Versions/LMSmapas.py | 1 | 6130 | # ====================================================================================================================================
# ====================================================================================================================================
###############################################################
# LMEI: Levenberg-Marquardt (with constrain) for a Milne-Eddignton atmosphere Inversion Smooth version
#
# CALL: python LMSmapas.py
###############################################################
# ================================================= LIBRARIES
from mutils2 import *
from milne import *
import time
import os
from LMmilne import *
import numpy as np
from math import pi
# from lmfit import minimize, Parameters, fit_report
import matplotlib.pyplot as plt
from scipy import ndimage
# ================================================= INPUT
nlinea = 3
finalsave = 'finalLMmilne3S.npy'
rango = np.arange(32, 130)
wave = np.load('/scratch/carlos/datos_gregor/BINNING/xLambdaBin.npy')
obs = np.load('/scratch/carlos/datos_gregor/BINNING/mapa12B2.npy')
# obs = obs[:, :, 0:20, :]
# ================================================= INPUT2
Chitol = 1e-6
Maxifev = 300
pesoI0 = 1.
pesoQ0 = 2.
pesoU0 = 2.
pesoV0 = 4.
vlos = 1.1 # Velocidad Doppler
eta0 = 3. # Cociente de abs linea-continuo
a = 0.6 # Parametro de amortiguamiento
ddop = 0.05 # Anchura Doppler
S_0 = 0.3 # Ordenada de la funcion fuente
S_1 = 0.6 # Gradiente de la funcion fuente
siter = 2 # Numero de iteraciones
# ================================================= LOADING
param = paramLine(nlinea)
l0 = param[2][-4]
wave = wave[rango] - l0
obs = obs[:, :, :, rango]
dx = wave[1]-wave[0]
x = np.arange(wave[0], wave[-1]+dx, dx)
yinver = np.ones((obs.shape[0], obs.shape[2], 11))
npix = obs.shape[0]*obs.shape[2]
ipp = 0.3*3.*2.
print('Tiempo estimado: {0:4.2f} s == {1:4.2f} min'.format(ipp*npix, ipp*npix/60.))
# ================================================= SMOOTH
Maxifeviter = np.linspace(Maxifev/(siter*2), Maxifev/siter, siter)
print('Iteraciones: {0}'.format(Maxifeviter))
time.sleep(5)
sigmaiter = np.linspace(1.0, 0.0, siter)
time0 = time.time()
for niter in range(siter):
# ================================================= INVERSION
for i in range(obs.shape[0]):
for j in range(obs.shape[2]):
y2 = obs[i, :, j, :]
yc = list(y2[0])+list(y2[1])+list(y2[2])+list(y2[3])
if niter == 0:
# Modulo Initial conditions:
iB, igamma, ixi = initialConditions(y2, nlinea, x, param)
ixi = rad((grad(ixi) + 180.) % 180.)
igamma = rad((grad(igamma) + 180.) % 180.)
#igamma = grad(igamma)
# Array de valores iniciales
p = [iB, igamma, ixi, vlos, eta0, a, ddop, S_0, S_1]
if niter != 0:
p = yinver[i, j, :]
pesoI = 1.*pesoI0
pesoQ = 1./max(y2[1])*pesoQ0
pesoU = 1./max(y2[2])*pesoU0
pesoV = 1./max(y2[3])*pesoV0
# print('----------------------------------------------------')
# print('pesos V: {0:2.3f}'.format(pesoV))
# print('pesos Q, U: {0:2.3f}, {1:2.3f}'.format(pesoQ, pesoU))
print('quedan: {0:4.1f} s.'.format(np.abs(time.time()-time0-ipp*npix)))
# Establecemos los pesos
peso = ones(len(yc))
peso[0:len(yc)/4] = pesoI
peso[len(yc)/4:3*len(yc)/4] = pesoQ
peso[2*len(yc)/4:3*len(yc)/4] = pesoU
peso[3*len(yc)/4:] = pesoV
p0 = Parameters()
p0.add('B', value=p[0], min=1.0, max=2000.)
p0.add('gamma', value=p[1], min=0., max=pi)
p0.add('xi', value=p[2], min=0., max=pi)
p0.add('vlos', value=p[3], min=-5., max=+5.)
p0.add('eta0', value=p[4], min=0., max=15.)
p0.add('a', value=p[5], min=0., max=0.8)
p0.add('ddop', value=p[6], min=0.0, max=0.1)
p0.add('S_0', value=p[7], min=0.0, max=1.5)
p0.add('S_1', value=p[8], min=0.0, max=0.7)
# stokes0 = stokesSyn(param, x, B, gamma, xi, vlos, eta0, a, ddop, S_0, S_1)
[ysync, out] = inversionStokes(p0, x, yc, param, Chitol, int(Maxifeviter[niter]), peso)
# print('Time: {0:2.4f} s'.format(time.time()-time0))
# print(fit_report(out, show_correl=False))
vals = out.params
yinver[i, j, 0] = vals['B'].value
yinver[i, j, 1] = vals['gamma'].value
yinver[i, j, 2] = vals['xi'].value
yinver[i, j, 3] = vals['vlos'].value
yinver[i, j, 4] = vals['eta0'].value
yinver[i, j, 5] = vals['a'].value
yinver[i, j, 6] = vals['ddop'].value
yinver[i, j, 7] = vals['S_0'].value
yinver[i, j, 8] = vals['S_1'].value
yinver[i, j, 9] = out.chisqr
yinver[i, j, 10] = out.nfev
# print('nfev: {0}'.format(out.nfev))
for mag in range(9):
yinver[:, :, mag] = ndimage.gaussian_filter(yinver[:, :, mag], sigma=sigmaiter[niter])
# Paso a grados:
for i in range(obs.shape[0]):
for j in range(obs.shape[2]):
yinver[i, j, 1] = grad(yinver[i, j, 1])
yinver[i, j, 2] = grad(yinver[i, j, 2])
print('Time: {0:2.4f} s'.format(time.time()-time0))
print('Time_per_pixel: {0:2.4f} s'.format((time.time()-time0)/npix))
np.save(finalsave, yinver)
# Notificacion 10 s
os.system('notify-send -i face-cool "===> MPyHazel <===" --expire-time=20000')
# PLOT
titulos = ['B', 'thetaB', 'phiB', 'vlos', 'eta0', 'a', 'ddop', 'S_0', 'S_1', 'chi2']
# plt.figure(1, figsize(18,9))
for i in range(9):
plt.subplot(3, 3, i+1)
plt.imshow(yinver[:, :, i], cmap='cubehelix', origin='lower', interpolation='none')
plt.title(titulos[i])
plt.colorbar()
plt.tight_layout()
plt.show()
| mit |
maminian/skewtools | scripts/animate_channel_stats_mc.py | 1 | 3485 | #!/usr/bin/python
import sys
import numpy as np
from numpy import *
import matplotlib.pyplot as pyplot
import matplotlib.animation as anim
import h5py
import time
from matplotlib.colors import LogNorm
from matplotlib import gridspec
# ------------------------------------------------
def update(i,fig,axMe,axVar,axSk,axKu,Me,Var,Sk,Ku,mtext,nbins,Pe,t):
# fig.clear()
# gs = gridspec.GridSpec(2,2)
# axMe = fig.add_subplot(gs[0])
# axVar = fig.add_subplot(gs[1])
# axSk = fig.add_subplot(gs[2])
# axKu = fig.add_subplot(gs[3])
# mtext.remove()
mtext.set_text(r'Channel stats on slices; $\tau = %.2e$'%t[i])
# fig.canvas.draw()
ya = np.linspace(-1.,1.,nbins)
pyplot.sca(axMe)
pyplot.cla()
pyplot.step(ya,Me[i,:],where='mid')
# axMe.set_xticks([])
# pyplot.xlabel(r'$y$')
memin = Me[:(i+1),:].min()
memax = Me[:(i+1),:].max()
axMe.set_ylim([1.1*memin,1.1*memax])
pyplot.title(r'Mean')
# axMe.set_ylim([-2./3*Pe*t[i],1./3*Pe*t[i]])
pyplot.sca(axVar)
pyplot.cla()
pyplot.step(ya,Var[i,:]/(2.*t[i]),where='mid')
# axVar.set_xticks([])
# pyplot.xlabel(r'$y$')
merp=max(1.,Var[:(i+1),:].max()/(2.*t[i]))
axVar.set_ylim([0.,1.1*merp])
pyplot.title(r'Variance/$2 \tau$')
# axVar.set_ylim([2.*t[i]])
pyplot.sca(axSk)
pyplot.cla()
pyplot.step(ya,Sk[i,:],where='mid')
# axSk.set_xticks([])
pyplot.xlabel(r'$y$')
pyplot.title(r'Skewness')
axSk.set_ylim([-3.,2.])
pyplot.sca(axKu)
pyplot.cla()
pyplot.step(ya,Ku[i,:],where='mid')
pyplot.title(r'Kurtosis')
pyplot.xlabel(r'$y$')
axKu.set_ylim([-2.,5.])
return axMe,axVar,axSk,axKu
def animate_stats(Me,Var,Sk,Ku,nbins,Pe,t):
# We're going to have four plots, so need to create space
# accordingly.
fig = pyplot.figure(figsize=(12,8))
gs = gridspec.GridSpec(2,2)
axMe = fig.add_subplot(gs[0])
axVar = fig.add_subplot(gs[1])
axSk = fig.add_subplot(gs[2])
axKu = fig.add_subplot(gs[3])
mtext = fig.suptitle(r'Channel stats on slices, ; $\tau = %.2e$'%t[0],fontsize=20)
# mmap = pyplot.cm.hot
# mmap.set_under(color='white')
# pyplot.tight_layout()
ani = anim.FuncAnimation( fig, update, frames=size(t),
fargs=(fig,axMe,axVar,axSk,axKu,Me,Var,Sk,Ku,mtext,nbins,Pe,t),repeat=False)
return ani
# --------------------------------------------
print "Reading data..."
walks = h5py.File(sys.argv[1])
print "Constructing animation..."
nbins = int(walks['nBins'].value)
Me = transpose(walks['Mean'].value)
Var = transpose(walks['Variance'].value)
Sk = transpose(walks['Skewness'].value)
Ku = transpose(walks['Kurtosis'].value)
t = transpose(walks['Time'].value)
Pe = walks['Peclet'].value
walks.close()
ani = animate_stats(Me,Var,Sk,Ku,nbins,Pe,t)
if (True):
print "Saving animation to disk (may take quite a while)..."
#
# dpi=200 gives decent quality for the filesize. Takes about 5-10 minutes to make.
# assume a file input '***.h5', chop off the suffix and add the '.mp4'.
fname = sys.argv[2]
# mywriter = anim.ImageMagickFileWriter()
mywriter = anim.FFMpegWriter()
ani.save(fname,dpi=120,fps=12,writer=mywriter)
else:
pyplot.show(block=False)
# end if
print "Done."
| gpl-3.0 |
dsm054/pandas | pandas/tests/frame/test_alter_axes.py | 1 | 53206 | # -*- coding: utf-8 -*-
from __future__ import print_function
import inspect
import pytest
from datetime import datetime, timedelta
import numpy as np
from pandas.compat import lrange, PY2
from pandas import (DataFrame, Series, Index, MultiIndex, RangeIndex,
IntervalIndex, DatetimeIndex, Categorical, cut,
Timestamp, date_range, to_datetime)
from pandas.core.dtypes.common import (
is_object_dtype,
is_categorical_dtype,
is_interval_dtype)
import pandas.util.testing as tm
class TestDataFrameAlterAxes():
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match='Length mismatch'):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match='Length mismatch'):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]},
index=[2010, 2011, 2012])
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('inplace', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols,
drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays([df.index] + [df[x] for x in keys],
names=[None] + keys)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize('keys', ['A', 'C', ['A', 'B'],
('tuple', 'as', 'label')])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols,
drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(['D'], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(['D'] + keys,
drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})
expected = DataFrame({'val': [1, 2]},
Index(['b', 'c'], name='key'))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index('key')
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize('box', [Series, Index, np.array,
list, tuple, iter, lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'B'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_single_array(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
key = box(df['B'])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/tuple/iter/list-of-list "forget" the name of B
name_mi = getattr(key, 'names', None)
name = [getattr(key, 'name', None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(['B'], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize('box', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x])])
@pytest.mark.parametrize('append, index_name',
[(True, None), (True, 'A'), (True, 'B'),
(True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays(self, frame_of_index_cols,
drop, append, index_name, box):
df = frame_of_index_cols
df.index.name = index_name
keys = ['A', box(df['B'])]
# np.array/list/tuple/iter "forget" the name of B
names = ['A', None if box in [np.array, list, tuple, iter] else 'B']
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(['A', 'B'], drop=False, append=append)
expected = expected.drop('A', axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize('box2', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('box1', [Series, Index, np.array,
list, tuple, iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name])
@pytest.mark.parametrize('append, index_name', [(True, None),
(True, 'A'), (True, 'test'), (False, None)])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_arrays_duplicate(self, frame_of_index_cols, drop,
append, index_name, box1, box2):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df['A']), box2(df['A'])]
result = df.set_index(keys, drop=drop, append=append)
# if either box was iter, the content has been consumed; re-read it
keys = [box1(df['A']), box2(df['A'])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = False if (keys[0] is 'A' and keys[1] is 'A') else drop
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be illegal
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols,
drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(['A', 'B'], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match='Index has duplicate keys'):
df.set_index('A', verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match='Index has duplicate keys'):
df.set_index([df['A'], df['A']], verify_integrity=True)
@pytest.mark.parametrize('append', [True, False])
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_raise(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(['foo', 'bar', 'baz'], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match='X'):
df.set_index([df['A'], df['B'], 'X'], drop=drop, append=append)
msg = 'The parameter "keys" may only contain a combination of.*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(set(df['A']), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(['A', df['A'], set(df['A'])],
drop=drop, append=append)
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = 'B'
# with Categorical
df = DataFrame({'A': np.random.randn(10),
'B': ci.values})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({'A': np.random.randn(10),
'B': ci})
idf = df.set_index('B')
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index('B')
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)],
'B': np.random.randn(1000)})
idf = df.set_index('A')
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(to_datetime(['2013-1-1 13:00',
'2013-1-2 14:00']),
name='B').tz_localize('US/Pacific')
df = DataFrame(np.random.randn(2, 1), columns=['A'])
expected = Series(np.array([Timestamp('2013-01-01 13:00:00-0800',
tz='US/Pacific'),
Timestamp('2013-01-02 14:00:00-0800',
tz='US/Pacific')],
dtype="object"), name='B')
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df['B'] = idx
result = df['B']
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
df['B'] = idx.to_series(index=[0, 1])
result = df['B']
comp = Series(DatetimeIndex(expected.values).tz_localize(None),
name='B')
tm.assert_series_equal(result, comp)
# list of datetimes with a tz
df['B'] = idx.to_pydatetime()
result = df['B']
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame(
[{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}])
expected = df.set_index('ts')
df.index = df['ts']
df.pop('ts')
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range('1/1/2011', periods=5,
freq='D', tz=tz, name='idx')
df = DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},
index=idx)
expected = DataFrame({'idx': [datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5)],
'a': range(5),
'b': ['A', 'B', 'C', 'D', 'E']},
columns=['idx', 'a', 'b'])
expected['idx'] = expected['idx'].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"],
utc=True).tz_convert('Europe/Rome')
df = DataFrame({'A': idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range('2006-10-29 00:00:00', periods=3,
freq='H', tz='US/Pacific')
df = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=di).reset_index()
# single level
res = df.set_index('index')
exp = DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]},
index=Index(di, name='index'))
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(['index', 'a'])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]],
names=['index', 'a'])
exp = DataFrame({'b': [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name='x')
original = DataFrame({'x': idx, 'y': np.arange(10)})[['x', 'y']]
result = original.set_index('x')
expected = DataFrame({'y': np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame([
{'a': 1, 'p': 0},
{'a': 2, 'm': 10},
{'a': 3, 'm': 11, 'p': 20},
{'a': 4, 'm': 12, 'p': 21}
], columns=('a', 'm', 'p', 'x'))
result = df.set_index(['a', 'x'])
expected = df[['m', 'p']]
expected.index = MultiIndex.from_arrays([df['a'], df['x']],
names=['a', 'x'])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match='Length mismatch'):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range('2011/01/01', periods=6, freq='M', tz='US/Eastern')
idx2 = date_range('2013', periods=6, freq='A', tz='Asia/Tokyo')
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(datetime(2015, 10, 1),
datetime(2015, 10, 1, 23),
freq='H', tz='US/Eastern')
df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
new_index = date_range(datetime(2015, 10, 2),
datetime(2015, 10, 2, 23),
freq='H', tz='US/Eastern')
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {
'A': 'a',
'B': 'b',
'C': 'c',
'D': 'd'
}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(renamed2.rename(columns=str.upper),
float_frame, check_names=False)
# index
data = {
'A': {'foo': 0, 'bar': 1}
}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index, Index(['foo', 'bar']))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(['BAR', 'FOO']))
# have to pass something
pytest.raises(TypeError, float_frame.rename)
# partial columns
renamed = float_frame.rename(columns={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.columns, Index(['A', 'B', 'foo', 'bar']))
# other axis
renamed = float_frame.T.rename(index={'C': 'foo', 'D': 'bar'})
tm.assert_index_equal(renamed.index, Index(['A', 'B', 'foo', 'bar']))
# index with name
index = Index(['foo', 'bar'], name='name')
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})
tm.assert_index_equal(renamed.index,
Index(['bar', 'foo'], name='name'))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis('foo')
result = float_frame.copy()
no_return = result.rename_axis('foo', inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis('bar', axis=1)
result = float_frame.copy()
no_return = result.rename_axis('bar', axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_warns(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis({0: 10, 1: 20}, axis=0)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df.rename_axis(id, axis=1)
assert 'rename' in str(w[0].message)
with tm.assert_produces_warning(FutureWarning) as w:
df['A'].rename_axis(id)
assert 'rename' in str(w[0].message)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([['a', 'b', 'c'], [1, 2]],
names=['ll', 'nn'])
df = DataFrame({'x': [i for i in range(len(mi))],
'y': [i * 10 for i in range(len(mi))]},
index=mi)
# Test for rename of the Index object of columns
result = df.rename_axis('cols', axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='cols'))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={'cols': 'new'}, axis=1)
tm.assert_index_equal(result.columns,
Index(['x', 'y'], name='new'))
# Test for renaming index using dict
result = df.rename_axis(index={'ll': 'foo'})
assert result.index.names == ['foo', 'nn']
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ['LL', 'NN']
# Test for renaming index providing complete list
result = df.rename_axis(index=['foo', 'goo'])
assert result.index.names == ['foo', 'goo']
# Test for changing index and columns at same time
sdf = df.reset_index().set_index('nn').drop(columns=['ll', 'y'])
result = sdf.rename_axis(index='foo', columns='meh')
assert result.index.name == 'foo'
assert result.columns.name == 'meh'
# Test different error cases
with pytest.raises(TypeError, match='Must pass'):
df.rename_axis(index='wrong')
with pytest.raises(ValueError, match='Length of names'):
df.rename_axis(index=['wrong'])
with pytest.raises(TypeError, match='bogus'):
df.rename_axis(bogus=None)
def test_rename_multiindex(self):
tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]
tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]
index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])
columns = MultiIndex.from_tuples(
tuples_columns, names=['fizz', 'buzz'])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar3')],
names=['foo', 'bar'])
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'),
('fizz2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'buzz1'),
('fizz2', 'buzz3')],
names=['fizz', 'buzz'])
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'},
level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples([('FIZZ1', 'buzz1'),
('FIZZ2', 'buzz2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='fizz')
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples([('fizz1', 'BUZZ1'),
('fizz2', 'BUZZ2')],
names=['fizz', 'buzz'])
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level='buzz')
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples([('foo3', 'bar1'),
('foo2', 'bar2')],
names=['foo', 'bar'])
renamed = df.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},
level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={'C': 'foo'}, copy=False)
renamed['foo'] = 1.
assert (float_frame['C'] == 1.).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={'C': 'foo'})
assert 'C' in float_frame
assert 'foo' not in float_frame
c_id = id(float_frame['C'])
float_frame = float_frame.copy()
float_frame.rename(columns={'C': 'foo'}, inplace=True)
assert 'C' not in float_frame
assert 'foo' in float_frame
assert id(float_frame['foo']) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]})
df = df.rename(columns={0: 'a'})
df = df.rename(columns={1: 'b'})
df = df.set_index(['a', 'b'])
df.columns = ['2001-01-01']
expected = DataFrame([[1], [2]],
index=MultiIndex.from_tuples(
[('foo', 'bah'), ('bar', 'bas')],
names=['a', 'b']),
columns=['2001-01-01'])
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)],
columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)],
columns=["a"])
tm.assert_frame_equal(df, expected)
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(['L0', 'L1', 'L2'])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},
index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(['L0', 'L0', 'L0'])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({'foo': stacked, 'bar': stacked})
names = ['first', 'second']
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, lab) in enumerate(zip(stacked.index.levels,
stacked.index.labels)):
values = lev.take(lab)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(deleveled['first'], deleveled2['level_0'],
check_names=False)
tm.assert_series_equal(deleveled['second'], deleveled2['level_1'],
check_names=False)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name='index')
tm.assert_series_equal(rdf['index'], exp)
# default name assigned, corner case
df = float_frame.copy()
df['index'] = 'foo'
rdf = df.reset_index()
exp = Series(float_frame.index.values, name='level_0')
tm.assert_series_equal(rdf['level_0'], exp)
# but this is ok
float_frame.index.name = 'index'
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled['index'], Series(float_frame.index))
tm.assert_index_equal(deleveled.index,
Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = 'columns'
resetted = float_frame.reset_index()
assert resetted.columns.name == 'columns'
# only remove certain columns
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index(['A', 'B'])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(['index', 'A', 'B'])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index('A')
xp = float_frame.reset_index().set_index(['index', 'B'])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(['index', 'A', 'B'])
rs = df.reset_index('A', drop=True)
xp = float_frame.copy()
del xp['A']
xp = xp.set_index(['B'], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'],
index=Index(range(2), name='x'))
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]],
columns=['A', 'B', 'C', 'D'])
for levels in ['A', 'B'], [0, 1]:
# With MultiIndex
result = df.set_index(['A', 'B']).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index('B'))
result = df.set_index(['A', 'B']).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(['A', 'B']).reset_index(level=levels,
drop=True)
tm.assert_frame_equal(result, df[['C', 'D']])
# With single-level Index (GH 16263)
result = df.set_index('A').reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index('A').reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(['A']).reset_index(level=levels[0],
drop=True)
tm.assert_frame_equal(result, df[['B', 'C', 'D']])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ['A', 'B'], ['A']:
with pytest.raises(KeyError, match='Level E '):
df.set_index(idx_lev).reset_index(level=['A', 'E'])
with pytest.raises(IndexError, match='Too many levels'):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series((9.81 * time ** 2) / 2,
index=Index(time, name='time'),
name='speed')
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted['time'].dtype == np.float64
resetted = df.reset_index()
assert resetted['time'].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ['x', 'y', 'z']
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(vals, Index(idx, name='a'),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index()
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill='blah')
xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
df = DataFrame(vals,
MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],
names=['d', 'a']),
columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])
rs = df.reset_index('a', )
xp = DataFrame(full, Index([0, 1, 2], name='d'),
columns=[['a', 'b', 'b', 'c'],
['', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill=None)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['a', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
rs = df.reset_index('a', col_fill='blah', col_level=1)
xp = DataFrame(full, Index(lrange(3), name='d'),
columns=[['blah', 'b', 'b', 'c'],
['a', 'mean', 'median', 'mean']])
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': [np.nan, 'b', 'c'],
'B': [0, 1, 2],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [0, 1, 2],
'C': [np.nan, 1.1, 2.2]})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({'A': ['a', 'b', 'c'],
'B': [np.nan, np.nan, np.nan],
'C': np.random.rand(3)})
rs = df.set_index(['A', 'B']).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame([[1, 2], [3, 4]],
columns=date_range('1/1/2013', '1/2/2013'),
index=['A', 'B'])
result = df.reset_index()
expected = DataFrame([['A', 1, 2], ['B', 3, 4]],
columns=['index', datetime(2013, 1, 1),
datetime(2013, 1, 2)])
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=['A', 'B'],
index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame([[0, 0, 0], [1, 1, 1]],
columns=['index', 'A', 'B'],
index=RangeIndex(stop=2))
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = 'name'
assert df.set_index(df.index).index.names == ['name']
mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])
mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,
names=['A', 'B', 'C', 'D'])
df = df.set_index(['A', 'B'])
assert df.set_index(df.index).index.names == ['A', 'B']
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(['C', 'D'])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert 'FOO' in renamed
assert 'foo' not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['X', 'Y'])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='columns')
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": 'a', 'B': 'b'}, axis='columns')
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({'X': 'x', 'Y': 'y'}, axis='index')
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis='index')
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ['a', 'b'], "B": ['c', 'd'],
'C': [1, 2]}).set_index(["A", "B"])
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=['X', 'Y'])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = ("Cannot specify both 'axis' and "
"any of 'index' or 'columns'")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame([[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=['a', 'b', 'c'],
columns=['d', 'e', 'f'])
res1 = df.reindex(['b', 'a'])
res2 = df.reindex(index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'])
res4 = df.reindex(labels=['b', 'a'], axis=0)
res5 = df.reindex(['b', 'a'], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=['e', 'd'])
res2 = df.reindex(['e', 'd'], axis=1)
res3 = df.reindex(labels=['e', 'd'], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=['b', 'a'], columns=['e', 'd'])
res2 = df.reindex(columns=['e', 'd'], index=['b', 'a'])
res3 = df.reindex(labels=['b', 'a'], axis=0).reindex(labels=['e', 'd'],
axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=['A', 'B'])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert 'rename' in message
assert 'Use named arguments' in message
def test_assign_columns(self, float_frame):
float_frame['hi'] = 'there'
df = float_frame.copy()
df.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']
tm.assert_series_equal(float_frame['C'], df['baz'], check_names=False)
tm.assert_series_equal(float_frame['hi'], df['foo2'],
check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame({'A': [1, 2, 1, 1, 2],
'B': [10, 16, 22, 28, 34],
'C1': Categorical(list("abaab"),
categories=list("bac"),
ordered=False),
'C2': Categorical(list("abaab"),
categories=list("bac"),
ordered=True)})
for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
@pytest.mark.skipif(PY2, reason="inspect.signature")
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {"self", "mapper", "index", "columns", "axis",
"inplace", "copy", "level"}
@pytest.mark.skipif(PY2, reason="inspect.signature")
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {"self", "labels", "index", "columns", "axis",
"limit", "copy", "level", "method",
"fill_value", "tolerance"}
def test_droplevel(self):
# GH20342
df = DataFrame([
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]
])
df = df.set_index([0, 1]).rename_axis(['a', 'b'])
df.columns = MultiIndex.from_tuples([('c', 'e'), ('d', 'f')],
names=['level_1', 'level_2'])
# test that dropping of a level in index works
expected = df.reset_index('a', drop=True)
result = df.droplevel('a', axis='index')
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(['c', 'd'], name='level_1')
result = df.droplevel('level_2', axis='columns')
tm.assert_frame_equal(result, expected)
class TestIntervalIndex(object):
def test_setitem(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df['B'] = s
df['C'] = np.array(s)
df['D'] = s.values
df['E'] = np.array(s.values)
assert is_categorical_dtype(df['B'])
assert is_interval_dtype(df['B'].cat.categories)
assert is_categorical_dtype(df['D'])
assert is_interval_dtype(df['D'].cat.categories)
assert is_object_dtype(df['C'])
assert is_object_dtype(df['E'])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df['B'], df['B'], check_names=False)
tm.assert_series_equal(df['B'], df['D'], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df['C'], df['C'], check_names=False)
tm.assert_series_equal(df['C'], df['E'], check_names=False)
def test_set_reset_index(self):
df = DataFrame({'A': range(10)})
s = cut(df.A, 5)
df['B'] = s
df = df.set_index('B')
df = df.reset_index()
def test_set_axis_inplace(self):
# GH14636
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
for axis in expected:
# inplace=True
# The FutureWarning comes from the fact that we would like to have
# inplace default to False some day
for inplace, warn in (None, FutureWarning), (True, None):
kwargs = {'inplace': inplace}
result = df.copy()
with tm.assert_produces_warning(warn):
result.set_axis(list('abc'), axis=axis, **kwargs)
tm.assert_frame_equal(result, expected[axis])
# inplace=False
result = df.set_axis(list('abc'), axis=axis, inplace=False)
tm.assert_frame_equal(expected[axis], result)
# omitting the "axis" parameter
with tm.assert_produces_warning(None):
result = df.set_axis(list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[0])
# wrong values for the "axis" parameter
for axis in 3, 'foo':
with pytest.raises(ValueError, match='No axis named'):
df.set_axis(list('abc'), axis=axis, inplace=False)
def test_set_axis_prior_to_deprecation_signature(self):
df = DataFrame({'A': [1.1, 2.2, 3.3],
'B': [5.0, 6.1, 7.2],
'C': [4.4, 5.5, 6.6]},
index=[2010, 2011, 2012])
expected = {0: df.copy(),
1: df.copy()}
expected[0].index = list('abc')
expected[1].columns = list('abc')
expected['index'] = expected[0]
expected['columns'] = expected[1]
# old signature
for axis in expected:
with tm.assert_produces_warning(FutureWarning):
result = df.set_axis(axis, list('abc'), inplace=False)
tm.assert_frame_equal(result, expected[axis])
| bsd-3-clause |
kylerbrown/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
nomadcube/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
gladk/woodem | examples/old/concrete/uniax.py | 3 | 7655 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import division
from woo import utils,plot,pack,timing,eudoxos
import time, sys, os, copy
#import matplotlib
#matplotlib.rc('text',usetex=True)
#matplotlib.rc('text.latex',preamble=r'\usepackage{concrete}\usepackage{euler}')
"""
A fairly complex script performing uniaxial tension-compression test on hyperboloid-shaped specimen.
Most parameters of the model (and of the setup) can be read from table using woo-multi.
After the simulation setup, tension loading is run and stresses are periodically saved for plotting
as well as checked for getting below the maximum value so far. This indicates failure (see stopIfDamaged
function). After failure in tension, the original setup is loaded anew and the sense of loading reversed.
After failure in compression, strain-stress curves are saved via plot.saveGnuplot and we exit,
giving some useful information like peak stresses in tension/compression.
Running this script for the first time can take long time, as the specimen is prepared using triaxial
compression. Next time, however, an attempt is made to load previously-generated packing
(from /tmp/triaxPackCache.sqlite) and this expensive procedure is avoided.
The specimen length can be specified, its diameter is half of the length and skirt of the hyperboloid is
4/5 of the width.
The particle size is constant and can be specified using the sphereRadius parameter.
The 3d display has displacement scaling applied, so that the fracture looks more spectacular. The scale
is 1000 for tension and 100 for compression.
"""
# default parameters or from table
utils.readParamsFromTable(noTableOk=True, # unknownOk=True,
young=24e9,
poisson=.2,
G_over_E=.20,
sigmaT=3.5e6,
frictionAngle=atan(0.8),
epsCrackOnset=1e-4,
relDuctility=30,
intRadius=1.5,
dtSafety=.8,
damping=0.4,
strainRateTension=.05,
strainRateCompression=.5,
setSpeeds=True,
# 1=tension, 2=compression (ANDed; 3=both)
doModes=3,
specimenLength=.15,
sphereRadius=3.5e-3,
# isotropic confinement (should be negative)
isoPrestress=0,
# use the ScGeom variant
scGeom=False
)
from woo.params.table import *
if 'description' in O.tags.keys(): O.tags['id']=O.tags['id']+O.tags['description']
# make geom; the dimensions are hard-coded here; could be in param table if desired
# z-oriented hyperboloid, length 20cm, diameter 10cm, skirt 8cm
# using spheres 7mm of diameter
concreteId=O.materials.append(CpmMat(young=young,frictionAngle=frictionAngle,poisson=poisson,density=4800,sigmaT=sigmaT,relDuctility=relDuctility,epsCrackOnset=epsCrackOnset,G_over_E=G_over_E,isoPrestress=isoPrestress))
spheres=pack.randomDensePack(pack.inHyperboloid((0,0,-.5*specimenLength),(0,0,.5*specimenLength),.25*specimenLength,.17*specimenLength),spheresInCell=2000,radius=sphereRadius,memoizeDb='/tmp/triaxPackCache.sqlite',material=concreteId)
#spheres=pack.randomDensePack(pack.inAlignedBox((-.25*specimenLength,-.25*specimenLength,-.5*specimenLength),(.25*specimenLength,.25*specimenLength,.5*specimenLength)),spheresInCell=2000,radius=sphereRadius,memoizeDb='/tmp/triaxPackCache.sqlite')
O.bodies.append(spheres)
bb=utils.uniaxialTestFeatures()
negIds,posIds,axis,crossSectionArea=bb['negIds'],bb['posIds'],bb['axis'],bb['area']
O.dt=dtSafety*utils.PWaveTimeStep()
print 'Timestep',O.dt
mm,mx=[pt[axis] for pt in utils.aabbExtrema()]
coord_25,coord_50,coord_75=mm+.25*(mx-mm),mm+.5*(mx-mm),mm+.75*(mx-mm)
area_25,area_50,area_75=utils.approxSectionArea(coord_25,axis),utils.approxSectionArea(coord_50,axis),utils.approxSectionArea(coord_75,axis)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(aabbEnlargeFactor=intRadius,label='is2aabb'),],sweepLength=.05*sphereRadius,nBins=5,binCoeff=5),
InteractionLoop(
[Ig2_Sphere_Sphere_Dem3DofGeom(distFactor=intRadius,label='ss2d3dg') if not scGeom else Ig2_Sphere_Sphere_ScGeom(interactionDetectionFactor=intRadius,label='ss2sc')],
[Ip2_CpmMat_CpmMat_CpmPhys()],
[Law2_Dem3DofGeom_CpmPhys_Cpm(epsSoft=0) if not scGeom else Law2_ScGeom_CpmPhys_Cpm()],
),
NewtonIntegrator(damping=damping,label='damper'),
CpmStateUpdater(realPeriod=1),
UniaxialStrainer(strainRate=strainRateTension,axis=axis,asymmetry=0,posIds=posIds,negIds=negIds,crossSectionArea=crossSectionArea,blockDisplacements=False,blockRotations=False,setSpeeds=setSpeeds,label='strainer'),
PyRunner(virtPeriod=1e-6/strainRateTension,realPeriod=1,command='addPlotData()',label='plotDataCollector',initRun=True),
PyRunner(realPeriod=4,command='stopIfDamaged()',label='damageChecker'),
]
#O.miscParams=[Gl1_CpmPhys(dmgLabel=False,colorStrain=False,epsNLabel=False,epsT=False,epsTAxes=False,normal=False,contactLine=True)]
# plot stresses in ¼, ½ and ¾ if desired as well; too crowded in the graph that includes confinement, though
plot.plots={'eps':('sigma',)} #,'sigma.50')},'t':('eps')} #'sigma.25','sigma.50','sigma.75')}
O.saveTmp('initial');
O.timingEnabled=False
global mode
mode='tension' if doModes & 1 else 'compression'
def initTest():
global mode
print "init"
if O.iter>0:
O.wait();
O.loadTmp('initial')
print "Reversing plot data"; plot.reverseData()
else: plot.plot()
strainer.strainRate=abs(strainRateTension) if mode=='tension' else -abs(strainRateCompression)
try:
from woo import qt
renderer=qt.Renderer()
renderer.dispScale=(1000,1000,1000) if mode=='tension' else (100,100,100)
except ImportError: pass
print "init done, will now run."
O.step(); # to create initial contacts
# now reset the interaction radius and go ahead
if not scGeom: ss2d3dg.distFactor=-1.
else: ss2sc.interactionDetectionFactor=1.
is2aabb.aabbEnlargeFactor=-1.
O.run()
def stopIfDamaged():
global mode
if O.iter<2 or not plot.data.has_key('sigma'): return # do nothing at the very beginning
sigma,eps=plot.data['sigma'],plot.data['eps']
extremum=max(sigma) if (strainer.strainRate>0) else min(sigma)
minMaxRatio=0.5 if mode=='tension' else 0.5
if extremum==0: return
# uncomment to get graph for the very first time stopIfDamaged() is called
#eudoxos.estimatePoissonYoung(principalAxis=axis,stress=strainer.avgStress,plot=True,cutoff=0.3)
print O.tags['id'],mode,strainer.strain,sigma[-1]
import sys; sys.stdout.flush()
if abs(sigma[-1]/extremum)<minMaxRatio or abs(strainer.strain)>(5e-3 if isoPrestress==0 else 5e-2):
if mode=='tension' and doModes & 2: # only if compression is enabled
mode='compression'
O.save('/tmp/uniax-tension.woo.gz')
print "Saved /tmp/uniax-tension.woo.gz (for use with interaction-histogram.py and uniax-post.py)"
print "Damaged, switching to compression... "; O.pause()
# important! initTest must be launched in a separate thread;
# otherwise O.load would wait for the iteration to finish,
# but it would wait for initTest to return and deadlock would result
import thread; thread.start_new_thread(initTest,())
return
else:
print "Damaged, stopping."
ft,fc=max(sigma),min(sigma)
print 'Strengths fc=%g, ft=%g, |fc/ft|=%g'%(fc,ft,abs(fc/ft))
title=O.tags['description'] if 'description' in O.tags.keys() else O.tags['params']
print 'gnuplot',plot.saveGnuplot(O.tags['id'],title=title)
print 'Bye.'
#O.pause()
sys.exit(0)
def addPlotData():
woo.plot.addData({'t':O.time,'i':O.iter,'eps':strainer.strain,'sigma':strainer.avgStress+isoPrestress,
'sigma.25':utils.forcesOnCoordPlane(coord_25,axis)[axis]/area_25+isoPrestress,
'sigma.50':utils.forcesOnCoordPlane(coord_50,axis)[axis]/area_50+isoPrestress,
'sigma.75':utils.forcesOnCoordPlane(coord_75,axis)[axis]/area_75+isoPrestress,
})
plot.plot()
O.run()
initTest()
utils.waitIfBatch()
| gpl-2.0 |
RPGOne/scikit-learn | sklearn/tests/test_random_projection.py | 141 | 14040 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.metrics import euclidean_distances
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import gaussian_random_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.random_projection import SparseRandomProjection
from sklearn.random_projection import GaussianRandomProjection
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.exceptions import DataDimensionalityWarning
all_sparse_random_matrix = [sparse_random_matrix]
all_dense_random_matrix = [gaussian_random_matrix]
all_random_matrix = set(all_sparse_random_matrix + all_dense_random_matrix)
all_SparseRandomProjection = [SparseRandomProjection]
all_DenseRandomProjection = [GaussianRandomProjection]
all_RandomProjection = set(all_SparseRandomProjection +
all_DenseRandomProjection)
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros):
rng = np.random.RandomState(0)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def densify(matrix):
if not sp.issparse(matrix):
return matrix
else:
return matrix.toarray()
n_samples, n_features = (10, 1000)
n_nonzeros = int(n_samples * n_features / 100.)
data, data_csr = make_sparse_random_data(n_samples, n_features, n_nonzeros)
###############################################################################
# test on JL lemma
###############################################################################
def test_invalid_jl_domain():
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 1.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, 0.0)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 100, -0.1)
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 0, 0.5)
def test_input_size_jl_min_dim():
assert_raises(ValueError, johnson_lindenstrauss_min_dim,
3 * [100], 2 * [0.9])
assert_raises(ValueError, johnson_lindenstrauss_min_dim, 3 * [100],
2 * [0.9])
johnson_lindenstrauss_min_dim(np.random.randint(1, 10, size=(10, 10)),
0.5 * np.ones((10, 10)))
###############################################################################
# tests random matrix generation
###############################################################################
def check_input_size_random_matrix(random_matrix):
assert_raises(ValueError, random_matrix, 0, 0)
assert_raises(ValueError, random_matrix, -1, 1)
assert_raises(ValueError, random_matrix, 1, -1)
assert_raises(ValueError, random_matrix, 1, 0)
assert_raises(ValueError, random_matrix, -1, 0)
def check_size_generated(random_matrix):
assert_equal(random_matrix(1, 5).shape, (1, 5))
assert_equal(random_matrix(5, 1).shape, (5, 1))
assert_equal(random_matrix(5, 5).shape, (5, 5))
assert_equal(random_matrix(1, 1).shape, (1, 1))
def check_zero_mean_and_unit_norm(random_matrix):
# All random matrix should produce a transformation matrix
# with zero mean and unit norm for each columns
A = densify(random_matrix(10000, 1, random_state=0))
assert_array_almost_equal(0, np.mean(A), 3)
assert_array_almost_equal(1.0, np.linalg.norm(A), 1)
def check_input_with_sparse_random_matrix(random_matrix):
n_components, n_features = 5, 10
for density in [-1., 0.0, 1.1]:
assert_raises(ValueError,
random_matrix, n_components, n_features, density=density)
def test_basic_property_of_random_matrix():
# Check basic properties of random matrix generation
for random_matrix in all_random_matrix:
yield check_input_size_random_matrix, random_matrix
yield check_size_generated, random_matrix
yield check_zero_mean_and_unit_norm, random_matrix
for random_matrix in all_sparse_random_matrix:
yield check_input_with_sparse_random_matrix, random_matrix
random_matrix_dense = \
lambda n_components, n_features, random_state: random_matrix(
n_components, n_features, random_state=random_state,
density=1.0)
yield check_zero_mean_and_unit_norm, random_matrix_dense
def test_gaussian_random_matrix():
# Check some statical properties of Gaussian random matrix
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
# a_ij ~ N(0.0, 1 / n_components).
#
n_components = 100
n_features = 1000
A = gaussian_random_matrix(n_components, n_features, random_state=0)
assert_array_almost_equal(0.0, np.mean(A), 2)
assert_array_almost_equal(np.var(A, ddof=1), 1 / n_components, 1)
def test_sparse_random_matrix():
# Check some statical properties of sparse random matrix
n_components = 100
n_features = 500
for density in [0.3, 1.]:
s = 1 / density
A = sparse_random_matrix(n_components,
n_features,
density=density,
random_state=0)
A = densify(A)
# Check possible values
values = np.unique(A)
assert_in(np.sqrt(s) / np.sqrt(n_components), values)
assert_in(- np.sqrt(s) / np.sqrt(n_components), values)
if density == 1.0:
assert_equal(np.size(values), 2)
else:
assert_in(0., values)
assert_equal(np.size(values), 3)
# Check that the random matrix follow the proper distribution.
# Let's say that each element of a_{ij} of A is taken from
#
# - -sqrt(s) / sqrt(n_components) with probability 1 / 2s
# - 0 with probability 1 - 1 / s
# - +sqrt(s) / sqrt(n_components) with probability 1 / 2s
#
assert_almost_equal(np.mean(A == 0.0),
1 - 1 / s, decimal=2)
assert_almost_equal(np.mean(A == np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.mean(A == - np.sqrt(s) / np.sqrt(n_components)),
1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == 0.0, ddof=1),
(1 - 1 / s) * 1 / s, decimal=2)
assert_almost_equal(np.var(A == np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
assert_almost_equal(np.var(A == - np.sqrt(s) / np.sqrt(n_components),
ddof=1),
(1 - 1 / (2 * s)) * 1 / (2 * s), decimal=2)
###############################################################################
# tests on random projection transformer
###############################################################################
def test_sparse_random_projection_transformer_invalid_density():
for RandomProjection in all_SparseRandomProjection:
assert_raises(ValueError,
RandomProjection(density=1.1).fit, data)
assert_raises(ValueError,
RandomProjection(density=0).fit, data)
assert_raises(ValueError,
RandomProjection(density=-0.1).fit, data)
def test_random_projection_transformer_invalid_input():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').fit, [[0, 1, 2]])
assert_raises(ValueError,
RandomProjection(n_components=-10).fit, data)
def test_try_to_transform_before_fit():
for RandomProjection in all_RandomProjection:
assert_raises(ValueError,
RandomProjection(n_components='auto').transform, data)
def test_too_many_samples_to_find_a_safe_embedding():
data, _ = make_sparse_random_data(1000, 100, 1000)
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=0.1)
expected_msg = (
'eps=0.100000 and n_samples=1000 lead to a target dimension'
' of 5920 which is larger than the original space with'
' n_features=100')
assert_raise_message(ValueError, expected_msg, rp.fit, data)
def test_random_projection_embedding_quality():
data, _ = make_sparse_random_data(8, 5000, 15000)
eps = 0.2
original_distances = euclidean_distances(data, squared=True)
original_distances = original_distances.ravel()
non_identical = original_distances != 0.0
# remove 0 distances to avoid division by 0
original_distances = original_distances[non_identical]
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto', eps=eps, random_state=0)
projected = rp.fit_transform(data)
projected_distances = euclidean_distances(projected, squared=True)
projected_distances = projected_distances.ravel()
# remove 0 distances to avoid division by 0
projected_distances = projected_distances[non_identical]
distances_ratio = projected_distances / original_distances
# check that the automatically tuned values for the density respect the
# contract for eps: pairwise distances are preserved according to the
# Johnson-Lindenstrauss lemma
assert_less(distances_ratio.max(), 1 + eps)
assert_less(1 - eps, distances_ratio.min())
def test_SparseRandomProjection_output_representation():
for SparseRandomProjection in all_SparseRandomProjection:
# when using sparse input, the projected data can be forced to be a
# dense numpy array
rp = SparseRandomProjection(n_components=10, dense_output=True,
random_state=0)
rp.fit(data)
assert isinstance(rp.transform(data), np.ndarray)
sparse_data = sp.csr_matrix(data)
assert isinstance(rp.transform(sparse_data), np.ndarray)
# the output can be left to a sparse matrix instead
rp = SparseRandomProjection(n_components=10, dense_output=False,
random_state=0)
rp = rp.fit(data)
# output for dense input will stay dense:
assert isinstance(rp.transform(data), np.ndarray)
# output for sparse output will be sparse:
assert sp.issparse(rp.transform(sparse_data))
def test_correct_RandomProjection_dimensions_embedding():
for RandomProjection in all_RandomProjection:
rp = RandomProjection(n_components='auto',
random_state=0,
eps=0.5).fit(data)
# the number of components is adjusted from the shape of the training
# set
assert_equal(rp.n_components, 'auto')
assert_equal(rp.n_components_, 110)
if RandomProjection in all_SparseRandomProjection:
assert_equal(rp.density, 'auto')
assert_almost_equal(rp.density_, 0.03, 2)
assert_equal(rp.components_.shape, (110, n_features))
projected_1 = rp.transform(data)
assert_equal(projected_1.shape, (n_samples, 110))
# once the RP is 'fitted' the projection is always the same
projected_2 = rp.transform(data)
assert_array_equal(projected_1, projected_2)
# fit transform with same random seed will lead to the same results
rp2 = RandomProjection(random_state=0, eps=0.5)
projected_3 = rp2.fit_transform(data)
assert_array_equal(projected_1, projected_3)
# Try to transform with an input X of size different from fitted.
assert_raises(ValueError, rp.transform, data[:, 1:5])
# it is also possible to fix the number of components and the density
# level
if RandomProjection in all_SparseRandomProjection:
rp = RandomProjection(n_components=100, density=0.001,
random_state=0)
projected = rp.fit_transform(data)
assert_equal(projected.shape, (n_samples, 100))
assert_equal(rp.components_.shape, (100, n_features))
assert_less(rp.components_.nnz, 115) # close to 1% density
assert_less(85, rp.components_.nnz) # close to 1% density
def test_warning_n_components_greater_than_n_features():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
assert_warns(DataDimensionalityWarning,
RandomProjection(n_components=n_features + 1).fit, data)
def test_works_with_sparse_data():
n_features = 20
data, _ = make_sparse_random_data(5, n_features, int(n_features / 4))
for RandomProjection in all_RandomProjection:
rp_dense = RandomProjection(n_components=3,
random_state=1).fit(data)
rp_sparse = RandomProjection(n_components=3,
random_state=1).fit(sp.csr_matrix(data))
assert_array_almost_equal(densify(rp_dense.components_),
densify(rp_sparse.components_))
| bsd-3-clause |
ipashchenko/ml4vs | ml4vs/early_stopping.py | 1 | 8867 | import os
import numpy as np
from sklearn.cross_validation import StratifiedShuffleSplit, cross_val_score, \
StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from xgboost import XGBClassifier
from data_load import load_data, load_data_tgt
from plotting import plot_importance
from matplotlib import pyplot
# Load data
data_dir = '/home/ilya/code/ml4vs/data/dataset_OGLE/indexes_normalized'
file_1 = 'vast_lightcurve_statistics_normalized_variables_only.log'
file_0 = 'vast_lightcurve_statistics_normalized_constant_only.log'
file_0 = os.path.join(data_dir, file_0)
file_1 = os.path.join(data_dir, file_1)
names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2',
'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L',
'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp',
'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR']
names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID',
'Npts']
X, y, df, feature_names, delta = load_data([file_0, file_1], names,
names_to_delete)
imp = Imputer(missing_values='NaN', strategy='median', axis=0, verbose=2)
# This one is good
# model = XGBClassifier(max_depth=5, min_child_weight=1, gamma=0, subsample=0.8,
# colsample_bytree=0.8, scale_pos_weight=150,
# learning_rate=0.03, n_estimators=5000)
model = XGBClassifier(max_depth=5, min_child_weight=3, gamma=0.2, subsample=0.8,
colsample_bytree=0.8, scale_pos_weight=150,
learning_rate=0.01, n_estimators=5000,
reg_alpha=10.**(-5), reg_lambda=10.**(-5))
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.25, random_state=7)
for train_index, test_index in sss:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
imp.fit(X_train)
X_trained_scaled = imp.transform(X_train)
# Use the same transformation & imputation for testing data!
X_test_scaled = imp.transform(X_test)
eval_set = [(X_trained_scaled, y_train), (X_test_scaled, y_test)]
model.fit(X_trained_scaled, y_train, eval_metric=["error", "auc", "logloss"],
eval_set=eval_set, verbose=True, early_stopping_rounds=50)
y_pred = model.predict(X_test_scaled)
y_predproba = model.predict_proba(X_test)
accuracy = accuracy_score(y_test, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
f1 = f1_score(y_test, y_pred)
print("F1-score: %.2f%%" % (f1 * 100.0))
precision = precision_score(y_test, y_pred)
print("Precision: %.2f%%" % (precision * 100.0))
recall = recall_score(y_test, y_pred)
print("Recall: %.2f%%" % (recall * 100.0))
precisions, recalls, _ = precision_recall_curve(y_test, y_predproba[:, 1])
# retrieve performance metrics
results = model.evals_result()
epochs = len(results['validation_0']['error'])
x_axis = range(0, epochs)
# plot log loss
fig, ax = pyplot.subplots()
ax.plot(x_axis, results['validation_0']['logloss'], label='Train')
ax.plot(x_axis, results['validation_1']['logloss'], label='Test')
ax.legend()
pyplot.ylabel('Log Loss')
pyplot.title('XGBoost Log Loss')
pyplot.show()
# plot classification error
fig, ax = pyplot.subplots()
ax.plot(x_axis, results['validation_0']['error'], label='Train')
ax.plot(x_axis, results['validation_1']['error'], label='Test')
ax.legend()
pyplot.ylabel('Classification Error')
pyplot.title('XGBoost Classification Error')
pyplot.show()
# plot auc
fig, ax = pyplot.subplots()
ax.plot(x_axis, results['validation_0']['auc'], label='Train')
ax.plot(x_axis, results['validation_1']['auc'], label='Test')
ax.legend(loc='lower left')
pyplot.ylabel('AUC')
pyplot.title('XGBoost AUC')
pyplot.show()
plot_importance(model, feature_names)
# Plot Precision-Recall curve
fig, ax = pyplot.subplots()
ax.plot(recalls, precisions, label='Baseline')
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_ylim([0.0, 1.05])
ax.set_xlim([0.0, 1.0])
ax.set_title('XGBoost P-R curve')
ax.legend(loc='lower left')
fig.show()
do_cv = False
if do_cv:
imp = Imputer(missing_values='NaN', strategy='median', axis=0, verbose=2)
steps = [('imputation', imp),
('classification', XGBClassifier(max_depth=5, min_child_weight=1,
gamma=0.0, subsample=0.8,
colsample_bytree=0.8,
scale_pos_weight=150,
learning_rate=0.1,
n_estimators=model.best_ntree_limit))]
pipeline = Pipeline(steps)
kfold = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=7)
# results = cross_val_score(pipeline, X, y, scoring='f1', n_jobs=1)
# print results
# Using grdi search for tuning
param_grid = {#'classification__learning_rate': [0.1, 0.6, 0.01],
#'classification__max_depth': [4, 5, 6],
#'classification__min_child_weight': [2, 3, 4]
#'classification__gamma': [i/10.0 for i in range(0, 5)]
'classification__reg_alpha': [1e-5, 1e-2, 0.1, 1, 100],
'classification__reg_lambda': [1e-5, 1e-2, 0.1, 1, 100]
# 'classification__subsample': [i/10.0 for i in range(6, 10)],
# 'classification__colsample_bytree': [i/10.0 for i in range(6, 10)]
# 'classification__scale_pos_weight': [0., 1., 10, 100, 300]
# 'classification__subsample': [0.5, 0.75, 1]
}
print "Grid search CV..."
gs_cv = GridSearchCV(pipeline, param_grid, scoring="f1", n_jobs=1,
cv=kfold, verbose=1).fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (gs_cv.best_params_, gs_cv.best_score_))
plot_importance(gs_cv.best_estimator_.named_steps['classification'],
feature_names)
# Best parameters
best_xgb_params = gs_cv.best_estimator_.named_steps['classification'].get_params()
best_xgb_params['n_estimators'] = 5000
best_model = XGBClassifier(**best_xgb_params)
sss = StratifiedShuffleSplit(y, n_iter=1, test_size=0.25, random_state=7)
# Working with the same data as baseline model
best_model.fit(X_trained_scaled, y_train, eval_metric=["logloss"],
eval_set=eval_set, verbose=True, early_stopping_rounds=50)
y_predproba_ = best_model.predict_proba(X_test_scaled)
y_pred_ = best_model.predict(X_test_scaled)
precisions_, recalls_, _ = precision_recall_curve(y_test, y_predproba_[:, 1])
accuracy = accuracy_score(y_test, y_pred_)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
f1 = f1_score(y_test, y_pred_)
print("F1-score: %.2f%%" % (f1 * 100.0))
precision = precision_score(y_test, y_pred_)
print("Precision: %.2f%%" % (precision * 100.0))
recall = recall_score(y_test, y_pred_)
print("Recall: %.2f%%" % (recall * 100.0))
# Plot Precision-Recall curve
fig, ax = pyplot.subplots()
ax.plot(recalls, precisions, label='Baseline')
ax.plot(recalls_, precisions_, label='Best CV')
ax.set_xlabel('Recall')
ax.set_ylabel('Precision')
ax.set_ylim([0.0, 1.05])
ax.set_xlim([0.0, 1.0])
ax.set_title('XGBoost P-R curve')
ax.legend(loc='lower left')
fig.show()
predict_target = True
if predict_target:
imp = Imputer(missing_values='NaN', strategy='median', axis=0, verbose=2)
imp.fit(X)
X_scaled = imp.transform(X)
# Use the same transformation & imputation for testing data!
n = model.best_ntree_limit
model = XGBClassifier(max_depth=5, min_child_weight=3, gamma=0.2,
subsample=0.8, colsample_bytree=0.8,
scale_pos_weight=150, learning_rate=0.01,
n_estimators=n, reg_alpha=10.**(-5),
reg_lambda=10.**(-5))
model.fit(X_scaled, y)
file_tgt = 'LMC_SC19_PSF_Pgood98__vast_lightcurve_statistics_normalized.log'
file_tgt = os.path.join(data_dir, file_tgt)
X_tgt, feature_names, df = load_data_tgt(file_tgt, names, names_to_delete, delta)
X_tgt_scaled = imp.transform(X_tgt)
proba_pred = model.predict_proba(X_tgt_scaled)
idx = proba_pred[:, 1] > 0.25
print("Found {} variables".format(np.count_nonzero(idx)))
with open('target_variables.txt', 'w') as fo:
for line in list(df['star_ID'][idx]):
fo.write(line + '\n')
| mit |
ChinaQuants/blaze | blaze/server/tests/test_spider.py | 13 | 1805 | import sys
import os
import json
import pytest
import h5py
import numpy as np
import pandas as pd
from datashape import dshape
from blaze import spider, discover
@pytest.fixture
def data(tmpdir):
csvf = tmpdir.join('foo.csv')
csvf.write('a,b\n1,2\n3,4')
h5f = tmpdir.join('foo.hdf5')
data = np.random.randn(10, 2)
with h5py.File(str(h5f)) as f:
f.create_dataset(name='fooh5', shape=data.shape,
dtype=data.dtype, data=data)
jsonf = tmpdir.mkdir('sub').join('foo.json')
jsonf.write(json.dumps([{'a': 2,
'b': 3.14,
'c': str(pd.Timestamp('now'))},
{'a': 2,
'b': 4.2,
'c': None,
'd': 'foobar'}]))
return tmpdir
@pytest.fixture
def data_with_cycle(data):
data.join('cycle').mksymlinkto(data)
return data
def test_spider(data):
result = spider(str(data))
ss = """{
%r: {
'foo.csv': var * {a: int64, b: int64},
'foo.hdf5': {fooh5: 10 * 2 * float64},
sub: {'foo.json': 2 * {a: int64, b: float64, c: ?datetime, d: ?string}}
}
}""" % os.path.basename(str(data))
assert dshape(discover(result)) == dshape(ss)
@pytest.mark.skipif(sys.platform == 'win32',
reason='Windows does not have symlinks')
def test_spider_cycle(data_with_cycle):
result = spider(str(data_with_cycle), followlinks=True)
ss = """{
%r: {
'foo.csv': var * {a: int64, b: int64},
'foo.hdf5': {fooh5: 10 * 2 * float64},
sub: {'foo.json': 2 * {a: int64, b: float64, c: ?datetime, d: ?string}}
}
}""" % os.path.basename(str(data_with_cycle))
assert dshape(discover(result)) != dshape(ss)
| bsd-3-clause |
andybrnr/QuantEcon.py | quantecon/tests/tests_models/tests_solow/test_impulse_response.py | 7 | 5263 | """
Test suite for the impulse_response.py module.
@author : David R. Pugh
"""
from __future__ import division
import nose
import matplotlib.pyplot as plt
import numpy as np
from .... models.solow import cobb_douglas
params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(params)
def test_valid_impulse():
"""Testing validation of impulse attribute."""
# impulse attribute must be a dict
with nose.tools.assert_raises(AttributeError):
model.irf.impulse = (('alpha', 0.75), ('g', 0.04))
# impulse sttribute must have valid keys
with nose.tools.assert_raises(AttributeError):
model.irf.impulse = {'alpha': 0.56, 'bad_key': 0.55}
def test_impulse_response():
"""Testing computation of impulse response."""
original_params = {'A0': 1.0, 'g': 0.01, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# generate the impulse response
impulse = {'s': 0.30}
model.irf.impulse = impulse
model.irf.kind = 'efficiency_units'
model.irf.T = 500 # need to get "close" to new BGP
actual_ss = model.irf.impulse_response[-1, 1]
# compute steady state following the impulse
model.params.update(impulse)
expected_ss = model.steady_state
nose.tools.assert_almost_equals(actual_ss, expected_ss)
def test_per_capita_impulse_response():
"""Testing computation of per capita impulse response."""
original_params = {'A0': 1.0, 'g': 0.01, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# generate the per capita impulse response
impulse = {'alpha': 0.15}
model.irf.impulse = impulse
model.irf.kind = 'per_capita'
model.irf.T = 500 # need to get "close" to new BGP
actual_c = model.irf.impulse_response[-1, 3]
# compute steady state following the impulse
model.params.update(impulse)
A0, g = model.params['A0'], model.params['g']
scaling_factor = A0 * np.exp(g * model.irf.T)
c_ss = model.evaluate_consumption(model.steady_state)
expected_c = c_ss * scaling_factor
nose.tools.assert_almost_equals(actual_c, expected_c)
def test_levels_impulse_response():
"""Testing computation of levels impulse response."""
original_params = {'A0': 1.0, 'g': 0.01, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# generate the per capita impulse response
impulse = {'delta': 0.15}
model.irf.impulse = impulse
model.irf.kind = 'levels'
model.irf.T = 500 # need to get "close" to new BGP
actual_y = model.irf.impulse_response[-1, 2]
# compute steady state following the impulse
model.params.update(impulse)
A0, g = model.params['A0'], model.params['g']
L0, n = model.params['L0'], model.params['n']
scaling_factor = A0 * L0 * np.exp((g + n) * model.irf.T)
y_ss = model.evaluate_intensive_output(model.steady_state)
expected_y = y_ss * scaling_factor
nose.tools.assert_almost_equals(actual_y, expected_y)
def test_plot_efficiency_units_impulse_response():
"""Testing return type for plot_impulse_response."""
original_params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# initialize the impulse
model.irf.impulse = {'delta': 0.25}
model.irf.kind = 'efficiency_units'
fig, ax = plt.subplots(1, 1)
tmp_lines = model.irf.plot_impulse_response(ax, variable='output')
nose.tools.assert_is_instance(tmp_lines, list)
def test_plot_levels_impulse_response():
"""Testing return type for plot_impulse_response."""
original_params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# initialize the impulse
model.irf.impulse = {'alpha': 0.25}
model.irf.kind = 'levels'
fig, ax = plt.subplots(1, 1)
tmp_lines = model.irf.plot_impulse_response(ax, variable='output',
log=False)
nose.tools.assert_is_instance(tmp_lines, list)
def test_plot_per_capita_impulse_response():
"""Testing return type for plot_impulse_response."""
original_params = {'A0': 1.0, 'g': 0.02, 'L0': 1.0, 'n': 0.02, 's': 0.15,
'alpha': 0.33, 'delta': 0.05}
model = cobb_douglas.CobbDouglasModel(original_params)
# initialize the impulse
model.irf.impulse = {'g': 0.05}
model.irf.kind = 'per_capita'
fig, ax = plt.subplots(1, 1)
tmp_lines = model.irf.plot_impulse_response(ax, variable='output',
log=True)
nose.tools.assert_is_instance(tmp_lines, list)
def test_valid_kind():
"""Testing validation of the kind attribute."""
# kind sttribute must be a valid string
with nose.tools.assert_raises(AttributeError):
model.irf.kind = 'invalid_kind'
| bsd-3-clause |
tosolveit/scikit-learn | examples/text/hashing_vs_dict_vectorizer.py | 284 | 3265 | """
===========================================
FeatureHasher and DictVectorizer Comparison
===========================================
Compares FeatureHasher and DictVectorizer by using both to vectorize
text documents.
The example demonstrates syntax and speed only; it doesn't actually do
anything useful with the extracted vectors. See the example scripts
{document_classification_20newsgroups,clustering}.py for actual learning
on text documents.
A discrepancy between the number of terms reported for DictVectorizer and
for FeatureHasher is to be expected due to hash collisions.
"""
# Author: Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import re
import sys
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction import DictVectorizer, FeatureHasher
def n_nonzero_columns(X):
"""Returns the number of non-zero columns in a CSR matrix X."""
return len(np.unique(X.nonzero()[1]))
def tokens(doc):
"""Extract tokens from doc.
This uses a simple regex to break strings into tokens. For a more
principled approach, see CountVectorizer or TfidfVectorizer.
"""
return (tok.lower() for tok in re.findall(r"\w+", doc))
def token_freqs(doc):
"""Extract a dict mapping tokens from doc to their frequencies."""
freq = defaultdict(int)
for tok in tokens(doc):
freq[tok] += 1
return freq
categories = [
'alt.atheism',
'comp.graphics',
'comp.sys.ibm.pc.hardware',
'misc.forsale',
'rec.autos',
'sci.space',
'talk.religion.misc',
]
# Uncomment the following line to use a larger set (11k+ documents)
#categories = None
print(__doc__)
print("Usage: %s [n_features_for_hashing]" % sys.argv[0])
print(" The default number of features is 2**18.")
print()
try:
n_features = int(sys.argv[1])
except IndexError:
n_features = 2 ** 18
except ValueError:
print("not a valid number of features: %r" % sys.argv[1])
sys.exit(1)
print("Loading 20 newsgroups training data")
raw_data = fetch_20newsgroups(subset='train', categories=categories).data
data_size_mb = sum(len(s.encode('utf-8')) for s in raw_data) / 1e6
print("%d documents - %0.3fMB" % (len(raw_data), data_size_mb))
print()
print("DictVectorizer")
t0 = time()
vectorizer = DictVectorizer()
vectorizer.fit_transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % len(vectorizer.get_feature_names()))
print()
print("FeatureHasher on frequency dicts")
t0 = time()
hasher = FeatureHasher(n_features=n_features)
X = hasher.transform(token_freqs(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
print()
print("FeatureHasher on raw tokens")
t0 = time()
hasher = FeatureHasher(n_features=n_features, input_type="string")
X = hasher.transform(tokens(d) for d in raw_data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_size_mb / duration))
print("Found %d unique terms" % n_nonzero_columns(X))
| bsd-3-clause |
alisidd/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
yydxlv/data-science-from-scratch | code/working_with_data.py | 61 | 16549 | from __future__ import division
from collections import Counter, defaultdict
from functools import partial
from linear_algebra import shape, get_row, get_column, make_matrix, \
vector_mean, vector_sum, dot, magnitude, vector_subtract, scalar_multiply
from statistics import correlation, standard_deviation, mean
from probability import inverse_normal_cdf
from gradient_descent import maximize_batch
import math, random, csv
import matplotlib.pyplot as plt
import dateutil.parser
def bucketize(point, bucket_size):
"""floor the point to the next lower multiple of bucket_size"""
return bucket_size * math.floor(point / bucket_size)
def make_histogram(points, bucket_size):
"""buckets the points and counts how many in each bucket"""
return Counter(bucketize(point, bucket_size) for point in points)
def plot_histogram(points, bucket_size, title=""):
histogram = make_histogram(points, bucket_size)
plt.bar(histogram.keys(), histogram.values(), width=bucket_size)
plt.title(title)
plt.show()
def compare_two_distributions():
random.seed(0)
uniform = [random.randrange(-100,101) for _ in range(200)]
normal = [57 * inverse_normal_cdf(random.random())
for _ in range(200)]
plot_histogram(uniform, 10, "Uniform Histogram")
plot_histogram(normal, 10, "Normal Histogram")
def random_normal():
"""returns a random draw from a standard normal distribution"""
return inverse_normal_cdf(random.random())
xs = [random_normal() for _ in range(1000)]
ys1 = [ x + random_normal() / 2 for x in xs]
ys2 = [-x + random_normal() / 2 for x in xs]
def scatter():
plt.scatter(xs, ys1, marker='.', color='black', label='ys1')
plt.scatter(xs, ys2, marker='.', color='gray', label='ys2')
plt.xlabel('xs')
plt.ylabel('ys')
plt.legend(loc=9)
plt.show()
def correlation_matrix(data):
"""returns the num_columns x num_columns matrix whose (i, j)th entry
is the correlation between columns i and j of data"""
_, num_columns = shape(data)
def matrix_entry(i, j):
return correlation(get_column(data, i), get_column(data, j))
return make_matrix(num_columns, num_columns, matrix_entry)
def make_scatterplot_matrix():
# first, generate some random data
num_points = 100
def random_row():
row = [None, None, None, None]
row[0] = random_normal()
row[1] = -5 * row[0] + random_normal()
row[2] = row[0] + row[1] + 5 * random_normal()
row[3] = 6 if row[2] > -2 else 0
return row
random.seed(0)
data = [random_row()
for _ in range(num_points)]
# then plot it
_, num_columns = shape(data)
fig, ax = plt.subplots(num_columns, num_columns)
for i in range(num_columns):
for j in range(num_columns):
# scatter column_j on the x-axis vs column_i on the y-axis
if i != j: ax[i][j].scatter(get_column(data, j), get_column(data, i))
# unless i == j, in which case show the series name
else: ax[i][j].annotate("series " + str(i), (0.5, 0.5),
xycoords='axes fraction',
ha="center", va="center")
# then hide axis labels except left and bottom charts
if i < num_columns - 1: ax[i][j].xaxis.set_visible(False)
if j > 0: ax[i][j].yaxis.set_visible(False)
# fix the bottom right and top left axis labels, which are wrong because
# their charts only have text in them
ax[-1][-1].set_xlim(ax[0][-1].get_xlim())
ax[0][0].set_ylim(ax[0][1].get_ylim())
plt.show()
def parse_row(input_row, parsers):
"""given a list of parsers (some of which may be None)
apply the appropriate one to each element of the input_row"""
return [parser(value) if parser is not None else value
for value, parser in zip(input_row, parsers)]
def parse_rows_with(reader, parsers):
"""wrap a reader to apply the parsers to each of its rows"""
for row in reader:
yield parse_row(row, parsers)
def try_or_none(f):
"""wraps f to return None if f raises an exception
assumes f takes only one input"""
def f_or_none(x):
try: return f(x)
except: return None
return f_or_none
def parse_row(input_row, parsers):
return [try_or_none(parser)(value) if parser is not None else value
for value, parser in zip(input_row, parsers)]
def try_parse_field(field_name, value, parser_dict):
"""try to parse value using the appropriate function from parser_dict"""
parser = parser_dict.get(field_name) # None if no such entry
if parser is not None:
return try_or_none(parser)(value)
else:
return value
def parse_dict(input_dict, parser_dict):
return { field_name : try_parse_field(field_name, value, parser_dict)
for field_name, value in input_dict.iteritems() }
#
#
# MANIPULATING DATA
#
#
def picker(field_name):
"""returns a function that picks a field out of a dict"""
return lambda row: row[field_name]
def pluck(field_name, rows):
"""turn a list of dicts into the list of field_name values"""
return map(picker(field_name), rows)
def group_by(grouper, rows, value_transform=None):
# key is output of grouper, value is list of rows
grouped = defaultdict(list)
for row in rows:
grouped[grouper(row)].append(row)
if value_transform is None:
return grouped
else:
return { key : value_transform(rows)
for key, rows in grouped.iteritems() }
def percent_price_change(yesterday, today):
return today["closing_price"] / yesterday["closing_price"] - 1
def day_over_day_changes(grouped_rows):
# sort the rows by date
ordered = sorted(grouped_rows, key=picker("date"))
# zip with an offset to get pairs of consecutive days
return [{ "symbol" : today["symbol"],
"date" : today["date"],
"change" : percent_price_change(yesterday, today) }
for yesterday, today in zip(ordered, ordered[1:])]
#
#
# RESCALING DATA
#
#
def scale(data_matrix):
num_rows, num_cols = shape(data_matrix)
means = [mean(get_column(data_matrix,j))
for j in range(num_cols)]
stdevs = [standard_deviation(get_column(data_matrix,j))
for j in range(num_cols)]
return means, stdevs
def rescale(data_matrix):
"""rescales the input data so that each column
has mean 0 and standard deviation 1
ignores columns with no deviation"""
means, stdevs = scale(data_matrix)
def rescaled(i, j):
if stdevs[j] > 0:
return (data_matrix[i][j] - means[j]) / stdevs[j]
else:
return data_matrix[i][j]
num_rows, num_cols = shape(data_matrix)
return make_matrix(num_rows, num_cols, rescaled)
#
# DIMENSIONALITY REDUCTION
#
X = [
[20.9666776351559,-13.1138080189357],
[22.7719907680008,-19.8890894944696],
[25.6687103160153,-11.9956004517219],
[18.0019794950564,-18.1989191165133],
[21.3967402102156,-10.8893126308196],
[0.443696899177716,-19.7221132386308],
[29.9198322142127,-14.0958668502427],
[19.0805843080126,-13.7888747608312],
[16.4685063521314,-11.2612927034291],
[21.4597664701884,-12.4740034586705],
[3.87655283720532,-17.575162461771],
[34.5713920556787,-10.705185165378],
[13.3732115747722,-16.7270274494424],
[20.7281704141919,-8.81165591556553],
[24.839851437942,-12.1240962157419],
[20.3019544741252,-12.8725060780898],
[21.9021426929599,-17.3225432396452],
[23.2285885715486,-12.2676568419045],
[28.5749111681851,-13.2616470619453],
[29.2957424128701,-14.6299928678996],
[15.2495527798625,-18.4649714274207],
[26.5567257400476,-9.19794350561966],
[30.1934232346361,-12.6272709845971],
[36.8267446011057,-7.25409849336718],
[32.157416823084,-10.4729534347553],
[5.85964365291694,-22.6573731626132],
[25.7426190674693,-14.8055803854566],
[16.237602636139,-16.5920595763719],
[14.7408608850568,-20.0537715298403],
[6.85907008242544,-18.3965586884781],
[26.5918329233128,-8.92664811750842],
[-11.2216019958228,-27.0519081982856],
[8.93593745011035,-20.8261235122575],
[24.4481258671796,-18.0324012215159],
[2.82048515404903,-22.4208457598703],
[30.8803004755948,-11.455358009593],
[15.4586738236098,-11.1242825084309],
[28.5332537090494,-14.7898744423126],
[40.4830293441052,-2.41946428697183],
[15.7563759125684,-13.5771266003795],
[19.3635588851727,-20.6224770470434],
[13.4212840786467,-19.0238227375766],
[7.77570680426702,-16.6385739839089],
[21.4865983854408,-15.290799330002],
[12.6392705930724,-23.6433305964301],
[12.4746151388128,-17.9720169566614],
[23.4572410437998,-14.602080545086],
[13.6878189833565,-18.9687408182414],
[15.4077465943441,-14.5352487124086],
[20.3356581548895,-10.0883159703702],
[20.7093833689359,-12.6939091236766],
[11.1032293684441,-14.1383848928755],
[17.5048321498308,-9.2338593361801],
[16.3303688220188,-15.1054735529158],
[26.6929062710726,-13.306030567991],
[34.4985678099711,-9.86199941278607],
[39.1374291499406,-10.5621430853401],
[21.9088956482146,-9.95198845621849],
[22.2367457578087,-17.2200123442707],
[10.0032784145577,-19.3557700653426],
[14.045833906665,-15.871937521131],
[15.5640911917607,-18.3396956121887],
[24.4771926581586,-14.8715313479137],
[26.533415556629,-14.693883922494],
[12.8722580202544,-21.2750596021509],
[24.4768291376862,-15.9592080959207],
[18.2230748567433,-14.6541444069985],
[4.1902148367447,-20.6144032528762],
[12.4332594022086,-16.6079789231489],
[20.5483758651873,-18.8512560786321],
[17.8180560451358,-12.5451990696752],
[11.0071081078049,-20.3938092335862],
[8.30560561422449,-22.9503944138682],
[33.9857852657284,-4.8371294974382],
[17.4376502239652,-14.5095976075022],
[29.0379635148943,-14.8461553663227],
[29.1344666599319,-7.70862921632672],
[32.9730697624544,-15.5839178785654],
[13.4211493998212,-20.150199857584],
[11.380538260355,-12.8619410359766],
[28.672631499186,-8.51866271785711],
[16.4296061111902,-23.3326051279759],
[25.7168371582585,-13.8899296143829],
[13.3185154732595,-17.8959160024249],
[3.60832478605376,-25.4023343597712],
[39.5445949652652,-11.466377647931],
[25.1693484426101,-12.2752652925707],
[25.2884257196471,-7.06710309184533],
[6.77665715793125,-22.3947299635571],
[20.1844223778907,-16.0427471125407],
[25.5506805272535,-9.33856532270204],
[25.1495682602477,-7.17350567090738],
[15.6978431006492,-17.5979197162642],
[37.42780451491,-10.843637288504],
[22.974620174842,-10.6171162611686],
[34.6327117468934,-9.26182440487384],
[34.7042513789061,-6.9630753351114],
[15.6563953929008,-17.2196961218915],
[25.2049825789225,-14.1592086208169]
]
def de_mean_matrix(A):
"""returns the result of subtracting from every value in A the mean
value of its column. the resulting matrix has mean 0 in every column"""
nr, nc = shape(A)
column_means, _ = scale(A)
return make_matrix(nr, nc, lambda i, j: A[i][j] - column_means[j])
def direction(w):
mag = magnitude(w)
return [w_i / mag for w_i in w]
def directional_variance_i(x_i, w):
"""the variance of the row x_i in the direction w"""
return dot(x_i, direction(w)) ** 2
def directional_variance(X, w):
"""the variance of the data in the direction w"""
return sum(directional_variance_i(x_i, w) for x_i in X)
def directional_variance_gradient_i(x_i, w):
"""the contribution of row x_i to the gradient of
the direction-w variance"""
projection_length = dot(x_i, direction(w))
return [2 * projection_length * x_ij for x_ij in x_i]
def directional_variance_gradient(X, w):
return vector_sum(directional_variance_gradient_i(x_i,w) for x_i in X)
def first_principal_component(X):
guess = [1 for _ in X[0]]
unscaled_maximizer = maximize_batch(
partial(directional_variance, X), # is now a function of w
partial(directional_variance_gradient, X), # is now a function of w
guess)
return direction(unscaled_maximizer)
def first_principal_component_sgd(X):
guess = [1 for _ in X[0]]
unscaled_maximizer = maximize_stochastic(
lambda x, _, w: directional_variance_i(x, w),
lambda x, _, w: directional_variance_gradient_i(x, w),
X, [None for _ in X], guess)
return direction(unscaled_maximizer)
def project(v, w):
"""return the projection of v onto w"""
coefficient = dot(v, w)
return scalar_multiply(coefficient, w)
def remove_projection_from_vector(v, w):
"""projects v onto w and subtracts the result from v"""
return vector_subtract(v, project(v, w))
def remove_projection(X, w):
"""for each row of X
projects the row onto w, and subtracts the result from the row"""
return [remove_projection_from_vector(x_i, w) for x_i in X]
def principal_component_analysis(X, num_components):
components = []
for _ in range(num_components):
component = first_principal_component(X)
components.append(component)
X = remove_projection(X, component)
return components
def transform_vector(v, components):
return [dot(v, w) for w in components]
def transform(X, components):
return [transform_vector(x_i, components) for x_i in X]
if __name__ == "__main__":
print "correlation(xs, ys1)", correlation(xs, ys1)
print "correlation(xs, ys2)", correlation(xs, ys2)
# safe parsing
data = []
with open("comma_delimited_stock_prices.csv", "rb") as f:
reader = csv.reader(f)
for line in parse_rows_with(reader, [dateutil.parser.parse, None, float]):
data.append(line)
for row in data:
if any(x is None for x in row):
print row
print "stocks"
with open("stocks.txt", "rb") as f:
reader = csv.DictReader(f, delimiter="\t")
data = [parse_dict(row, { 'date' : dateutil.parser.parse,
'closing_price' : float })
for row in reader]
max_aapl_price = max(row["closing_price"]
for row in data
if row["symbol"] == "AAPL")
print "max aapl price", max_aapl_price
# group rows by symbol
by_symbol = defaultdict(list)
for row in data:
by_symbol[row["symbol"]].append(row)
# use a dict comprehension to find the max for each symbol
max_price_by_symbol = { symbol : max(row["closing_price"]
for row in grouped_rows)
for symbol, grouped_rows in by_symbol.iteritems() }
print "max price by symbol"
print max_price_by_symbol
# key is symbol, value is list of "change" dicts
changes_by_symbol = group_by(picker("symbol"), data, day_over_day_changes)
# collect all "change" dicts into one big list
all_changes = [change
for changes in changes_by_symbol.values()
for change in changes]
print "max change", max(all_changes, key=picker("change"))
print "min change", min(all_changes, key=picker("change"))
# to combine percent changes, we add 1 to each, multiply them, and subtract 1
# for instance, if we combine +10% and -20%, the overall change is
# (1 + 10%) * (1 - 20%) - 1 = 1.1 * .8 - 1 = -12%
def combine_pct_changes(pct_change1, pct_change2):
return (1 + pct_change1) * (1 + pct_change2) - 1
def overall_change(changes):
return reduce(combine_pct_changes, pluck("change", changes))
overall_change_by_month = group_by(lambda row: row['date'].month,
all_changes,
overall_change)
print "overall change by month"
print overall_change_by_month
print "rescaling"
data = [[1, 20, 2],
[1, 30, 3],
[1, 40, 4]]
print "original: ", data
print "scale: ", scale(data)
print "rescaled: ", rescale(data)
print
print "PCA"
Y = de_mean_matrix(X)
components = principal_component_analysis(Y, 2)
print "principal components", components
print "first point", Y[0]
print "first point transformed", transform_vector(Y[0], components)
| unlicense |
stefanv/register_gui | viewer/canvastools/linetool.py | 1 | 6910 | import numpy as np
try:
from matplotlib import lines
except ImportError:
print("Could not import matplotlib -- skimage.viewer not available.")
from .base import CanvasToolBase, ToolHandles
__all__ = ['LineTool', 'ThickLineTool']
class LineTool(CanvasToolBase):
"""Widget for line selection in a plot.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
nograb_draw : bool
If a mouse click is detected, but it does not grab a handle,
redraw the line from scratch. True by default.
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, ax, on_move=None, on_release=None, on_enter=None,
maxdist=10, line_props=None, nograb_draw=True,
**kwargs):
super(LineTool, self).__init__(ax, on_move=on_move, on_enter=on_enter,
on_release=on_release, **kwargs)
props = dict(color='r', linewidth=1, alpha=0.4, solid_capstyle='butt')
props.update(line_props if line_props is not None else {})
self.linewidth = props['linewidth']
self.maxdist = maxdist
self._active_pt = None
self._nograb_draw = nograb_draw
x = (0, 0)
y = (0, 0)
self._end_pts = np.transpose([x, y])
self._line = lines.Line2D(x, y, visible=False, animated=True, **props)
ax.add_line(self._line)
self._handles = ToolHandles(ax, x, y)
self._handles.set_visible(False)
self._artists = [self._line, self._handles.artist]
if on_enter is None:
def on_enter(pts):
x, y = np.transpose(pts)
print("length = %0.2f" % np.sqrt(np.diff(x)**2 + np.diff(y)**2))
self.callback_on_enter = on_enter
self.connect_event('button_press_event', self.on_mouse_press)
self.connect_event('button_release_event', self.on_mouse_release)
self.connect_event('motion_notify_event', self.on_move)
@property
def end_points(self):
return self._end_pts
@end_points.setter
def end_points(self, pts):
self._end_pts = np.asarray(pts)
self._line.set_data(np.transpose(pts))
self._handles.set_data(np.transpose(pts))
self.set_visible(True)
self.redraw()
def on_mouse_press(self, event):
if event.button != 1 or not self.ax.in_axes(event):
return
self.set_visible(True)
idx, px_dist = self._handles.closest(event.x, event.y)
if px_dist < self.maxdist:
self._active_pt = idx
elif self._nograb_draw:
self._active_pt = 0
x, y = event.xdata, event.ydata
self._end_pts = np.array([[x, y], [x, y]])
def on_mouse_release(self, event):
if event.button != 1:
return
if self._active_pt is not None:
self._active_pt = None
self.callback_on_release(self.geometry)
self.redraw()
def on_move(self, event):
if event.button != 1 or self._active_pt is None:
return
if not self.ax.in_axes(event):
return
self.update(event.xdata, event.ydata)
self.callback_on_move(self.geometry)
def update(self, x=None, y=None):
if x is not None:
self._end_pts[self._active_pt, :] = x, y
self.end_points = self._end_pts
@property
def geometry(self):
return self.end_points
class ThickLineTool(LineTool):
"""Widget for line selection in a plot.
The thickness of the line can be varied using the mouse scroll wheel, or
with the '+' and '-' keys.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
on_change : function
Function called whenever the line thickness is changed.
maxdist : float
Maximum pixel distance allowed when selecting control handle.
line_props : dict
Properties for :class:`matplotlib.lines.Line2D`.
Attributes
----------
end_points : 2D array
End points of line ((x1, y1), (x2, y2)).
"""
def __init__(self, ax, on_move=None, on_enter=None, on_release=None,
on_change=None, maxdist=10, line_props=None):
super(ThickLineTool, self).__init__(ax,
on_move=on_move,
on_enter=on_enter,
on_release=on_release,
maxdist=maxdist,
line_props=line_props)
if on_change is None:
def on_change(*args):
pass
self.callback_on_change = on_change
self.connect_event('scroll_event', self.on_scroll)
self.connect_event('key_press_event', self.on_key_press)
def on_scroll(self, event):
if not event.inaxes:
return
if event.button == 'up':
self._thicken_scan_line()
elif event.button == 'down':
self._shrink_scan_line()
def on_key_press(self, event):
if event.key == '+':
self._thicken_scan_line()
elif event.key == '-':
self._shrink_scan_line()
def _thicken_scan_line(self):
self.linewidth += 1
self.update()
self.callback_on_change(self.geometry)
def _shrink_scan_line(self):
if self.linewidth > 1:
self.linewidth -= 1
self.update()
self.callback_on_change(self.geometry)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from skimage import data
image = data.camera()
f, ax = plt.subplots()
ax.imshow(image, interpolation='nearest')
h, w = image.shape
# line_tool = LineTool(ax)
line_tool = ThickLineTool(ax)
line_tool.end_points = ([w/3, h/2], [2*w/3, h/2])
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | sklearn/tree/export.py | 53 | 15772 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _tree.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
Tomires/messages | messages.py | 1 | 8745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import json
import os
import datetime
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from time import mktime
# config
MIN_MESSAGES = 100 # the minimum amount of messages for a person to appear in our userlist
DELAY_FLOOR = 5 # the maximum amount of time in minutes to not count as a delay
IGNORE_GROUP_CONVERSATIONS = True # should group conversations be taken into account?
DEBUG = False # displays useful debugging information
names = {}
with open('data/names.csv', 'rb') as names_csv:
names_file = csv.reader(names_csv, delimiter=',', quotechar='|')
for row in names_file:
names[row[0].replace('@facebook.com','')] = row[1]
json_data = open('data/messages.json')
threads = json.load(json_data)['threads']
sender_pop = {}
sender_pos = {}
sender_neg = {}
messages = []
ts_hourly = []
ts_weekday = []
unknown_ids = []
delay_total = []
delay_msgs = []
emoji_pos = ['😀', '😁', '😂', '😃', '😄', '😅',
'😆', '😉', '😊', '😋', '😎', '😍',
'😘', '😗', '😙', '😚', '🙂', '☺',
'😌', '😛', '😜', '😝', '😏',
':)', ':-)', ';)', ';-)', ':P',
':-P', '^^', '^.^', '^_^', ':o',
'xD', 'XD', ':D', ':-D']
emoji_neg = ['☹', '🙁', '😞', '😢', '😭', '😦',
'😧', '😰', '😡', '😠',
':(', ':-(', ':/', ':-/', 'D:', ':$',
'-_-', '-.-',
'X.x', 'X.X', 'x.x', 'x_x', 'X_x', 'X_X',
'>.>', '>.<', '<.<', '>_>', '>_<', '<_<']
for t in range(len(threads)):
if IGNORE_GROUP_CONVERSATIONS and len(threads[t]['participants']) > 2:
continue
for m in range(len(threads[t]['messages'])):
sender = threads[t]['messages'][m]['sender'].encode('utf-8')
message = threads[t]['messages'][m]['message'].encode('utf-8')
if '@' in sender:
try:
sender = names[sender.replace('@facebook.com','')]
except KeyError:
if sender not in unknown_ids:
unknown_ids.append(sender)
if sender in sender_pop:
sender_pop[sender] += 1
else:
sender_pop[sender] = 1
sender_pos[sender] = 0
sender_neg[sender] = 0
for e in emoji_pos:
if e in message:
sender_pos[sender] += 1
break
for e in emoji_neg:
if e in message:
sender_neg[sender] += 1
break
# normalize positivity / negativity
for sender in sender_pop:
sender_pos[sender] = sender_pos[sender] / sender_pop[sender]
sender_neg[sender] = sender_neg[sender] / sender_pop[sender]
sorted_pop = sorted(sender_pop.iteritems(), key=lambda x:-x[1])[:len(sender_pop)]
# friend ranking based on number of messages sent
total_messages = 0
rest_messages = 0
current_user = ''
owner_ratio = 0
user_threshold = 0
for sender in sorted_pop:
total_messages += sender[1]
if sender[1] > MIN_MESSAGES:
print sender[0] + " " + str(sender[1])
user_threshold += 1
else:
rest_messages += sender[1]
del(sender_pos[sender[0]])
del(sender_neg[sender[0]])
if current_user == '':
current_user = sender[0]
owner_messages = sender[1]
del(sender_pos[sender[0]])
del(sender_neg[sender[0]])
print '--------------------------'
print 'TOTAL MESSAGES: ' + str(total_messages)
print 'TOTAL USERS: ' + str(len(sorted_pop))
# personal message statistics
print 'Considering ' + current_user + ' as your account name. Hope it\'s correct!'
print 'Judging by the stats above, your messages make up for ' + str(int(owner_messages * 100 / total_messages)) + '% of total count.'
for i in range(24):
ts_hourly.append(0)
delay_total.append(0)
delay_msgs.append(0)
for i in range(7):
ts_weekday.append(0)
awaiting_reply = False
last_timestamp = 0
for t in range(len(threads)):
for m in range(len(threads[t]['messages'])):
sender = threads[t]['messages'][m]['sender'].encode('utf-8')
if '@' in sender:
try:
sender = names[sender.replace('@facebook.com','')]
except KeyError:
pass
if sender == current_user:
messages.append(threads[t]['messages'][m]['message'].encode('utf-8'))
timestamp = datetime.datetime.strptime(threads[t]['messages'][m]['date'].split('+')[0], '%Y-%m-%dT%H:%M')
ts_hourly[timestamp.hour] += 1
ts_weekday[timestamp.weekday()] += 1
if awaiting_reply:
hour = datetime.datetime.fromtimestamp(last_timestamp).hour
current_delay = (mktime(timestamp.timetuple()) - last_timestamp) / 60 # in minutes
if DELAY_FLOOR < current_delay < 1440: # it doesn't make sense for us to count past a day
delay_total[hour] += current_delay
delay_msgs[hour] += 1
awaiting_reply = False
else:
awaiting_reply = True
last_timestamp = mktime(datetime.datetime.strptime(threads[t]['messages'][m]['date'].split('+')[0], '%Y-%m-%dT%H:%M').timetuple())
awaiting_reply = False # move onto next thread
combo = ' '.join(messages)
word_average = int(len(combo) / owner_messages)
print 'You have written ' + str(len(combo)) + ' words across ' + str(owner_messages) + ' messages. Your average message contains ' + str(word_average) + ' words.'
if DEBUG:
print '> DEBUG MODE ON'
print '> PLEASE SUPPLY NAMES FOR THE FOLLOWING IDS:'
for id in unknown_ids:
print '> ' + id
# message distribution over time of day
plt.title('Message distribution')
plt.bar(range(24), ts_hourly, align='center', color='k', alpha=1)
plt.xticks([0,6,12,18], ['12 AM','6 AM', '12 PM', '6 PM'], fontsize=9)
plt.xlabel('Time of day', fontsize=12)
plt.ylabel('Messages', fontsize=12)
plt.savefig('output/ts_hourly.png')
plt.close()
# message distribution over days of week
plt.title('Message distribution')
plt.bar(range(7), ts_weekday, align='center', color='k', alpha=1)
plt.xticks([0,1,2,3,4,5,6], ['Mon','Tue','Wed','Thu','Fri','Sat','Sun'], fontsize=9)
plt.xlabel('Weekday', fontsize=12)
plt.ylabel('Messages', fontsize=12)
plt.savefig('output/ts_weekday.png')
plt.close()
# user popularity
fig, ax = plt.subplots()
sorted_pop_messages = [item[1] for item in sorted_pop[1:user_threshold]]
sorted_pop_messages.append(rest_messages)
sorted_pop_users = [item[0] for item in sorted_pop[1:user_threshold]]
sorted_pop_users.append('Other users')
for i in range(len(sorted_pop_users)):
sorted_pop_users[i] = unicode(sorted_pop_users[i], "utf-8")
plt.title('User popularity')
ax.pie(sorted_pop_messages, labels=sorted_pop_users, shadow=True, startangle=90)
ax.axis('equal')
plt.savefig('output/user_pop.png')
plt.close()
# average delay in replying to messages
average_delay = []
for hour in range(24):
if delay_msgs[hour] == 0:
average_delay.append(0)
else:
average_delay.append(delay_total[hour] / delay_msgs[hour])
plt.title('Average delay')
plt.bar(range(24), average_delay, align='center', color='k', alpha=1)
plt.xticks([0,6,12,18], ['12 AM','6 AM', '12 PM', '6 PM'], fontsize=9)
plt.xlabel('Time of day', fontsize=12)
plt.ylabel('Minutes', fontsize=12)
plt.savefig('output/avg_delay.png')
plt.close()
# positivity per user
sorted_pos = sorted(sender_pos.iteritems(), key=lambda x:-x[1])[:len(sender_pos)]
sorted_pos_value = [item[1] for item in sorted_pos]
sorted_pos_users = [item[0] for item in sorted_pos]
for i in range(len(sorted_pos_users)):
sorted_pos_users[i] = unicode(sorted_pos_users[i], "utf-8")
plt.bar(range(len(sorted_pos)), sorted_pos_value, align='center', color='k', alpha=1)
plt.xticks(range(len(sorted_pos)), sorted_pos_users, fontsize=9, rotation=90)
plt.xlabel('User', fontsize=12)
plt.ylabel('Positivity', fontsize=12)
plt.tight_layout()
plt.savefig('output/user_pos.png')
plt.close()
# negativity per user
sorted_neg = sorted(sender_neg.iteritems(), key=lambda x:-x[1])[:len(sender_neg)]
sorted_neg_value = [item[1] for item in sorted_neg]
sorted_neg_users = [item[0] for item in sorted_neg]
for i in range(len(sorted_neg_users)):
sorted_neg_users[i] = unicode(sorted_neg_users[i], "utf-8")
plt.bar(range(len(sorted_neg)), sorted_neg_value, align='center', color='k', alpha=1)
plt.xticks(range(len(sorted_neg)), sorted_neg_users, fontsize=9, rotation=90)
plt.xlabel('User', fontsize=12)
plt.ylabel('Negativity', fontsize=12)
plt.tight_layout()
plt.savefig('output/user_neg.png')
plt.close()
| gpl-3.0 |
q1ang/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
dboyliao/ibis | ibis/expr/api.py | 2 | 43161 | # Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.expr.datatypes import Schema # noqa
from ibis.expr.types import (Expr, # noqa
ValueExpr, ScalarExpr, ArrayExpr,
TableExpr,
NumericValue, NumericArray,
IntegerValue,
Int8Value, Int8Scalar, Int8Array,
Int16Value, Int16Scalar, Int16Array,
Int32Value, Int32Scalar, Int32Array,
Int64Value, Int64Scalar, Int64Array,
NullScalar,
BooleanValue, BooleanScalar, BooleanArray,
FloatValue, FloatScalar, FloatArray,
DoubleValue, DoubleScalar, DoubleArray,
StringValue, StringScalar, StringArray,
DecimalValue, DecimalScalar, DecimalArray,
TimestampValue, TimestampScalar, TimestampArray,
CategoryValue, unnamed, as_value_expr, literal,
null, sequence)
# __all__ is defined
from ibis.expr.temporal import * # noqa
import ibis.common as _com
from ibis.compat import py_string
from ibis.expr.analytics import bucket, histogram
from ibis.expr.groupby import GroupedTableExpr # noqa
from ibis.expr.window import window, trailing_window, cumulative_window
import ibis.expr.analytics as _analytics
import ibis.expr.analysis as _L
import ibis.expr.types as ir
import ibis.expr.operations as _ops
import ibis.expr.temporal as _T
import ibis.util as util
__all__ = [
'schema', 'table', 'literal', 'expr_list', 'timestamp',
'case', 'where', 'sequence',
'now', 'desc', 'null', 'NA',
'cast', 'coalesce', 'greatest', 'least',
'cross_join', 'join',
'aggregate',
'row_number',
'negate', 'ifelse',
'Expr', 'Schema',
'window', 'trailing_window', 'cumulative_window'
]
__all__ += _T.__all__
NA = null()
_data_type_docs = """\
Ibis uses its own type aliases that map onto database types. See, for
example, the correspondence between Ibis type names and Impala type names:
Ibis type Impala Type
~~~~~~~~~ ~~~~~~~~~~~
int8 TINYINT
int16 SMALLINT
int32 INT
int64 BIGINT
float FLOAT
double DOUBLE
boolean BOOLEAN
string STRING
timestamp TIMESTAMP
decimal(p, s) DECIMAL(p,s)"""
def schema(pairs=None, names=None, types=None):
if pairs is not None:
return Schema.from_tuples(pairs)
else:
return Schema(names, types)
def table(schema, name=None):
"""
Create an unbound Ibis table for creating expressions. Cannot be executed
without being bound to some physical table.
Useful for testing
Parameters
----------
schema : ibis Schema
name : string, default None
Name for table
Returns
-------
table : TableExpr
"""
if not isinstance(schema, Schema):
if isinstance(schema, list):
schema = Schema.from_tuples(schema)
else:
schema = Schema.from_dict(schema)
node = _ops.UnboundTable(schema, name=name)
return TableExpr(node)
def desc(expr):
"""
Create a sort key (when used in sort_by) by the passed array expression or
column name.
Parameters
----------
expr : array expression or string
Can be a column name in the table being sorted
Examples
--------
result = (self.table.group_by('g')
.size('count')
.sort_by(ibis.desc('count')))
"""
if not isinstance(expr, Expr):
return _ops.DeferredSortKey(expr, ascending=False)
else:
return _ops.SortKey(expr, ascending=False)
def timestamp(value):
"""
Returns a timestamp literal if value is likely coercible to a timestamp
"""
if isinstance(value, py_string):
from pandas import Timestamp
value = Timestamp(value)
op = ir.Literal(value)
return ir.TimestampScalar(op)
schema.__doc__ = """\
Validate and return an Ibis Schema object
{0}
Parameters
----------
pairs : list of (name, type) tuples
Mutually exclusive with names/types
names : list of string
Field names
types : list of string
Field types
Examples
--------
sc = schema([('foo', 'string'),
('bar', 'int64'),
('baz', 'boolean')])
sc2 = schema(names=['foo', 'bar', 'baz'],
types=['string', 'int64', 'boolean'])
Returns
-------
schema : Schema
""".format(_data_type_docs)
def case():
"""
Similar to the .case method on array expressions, create a case builder
that accepts self-contained boolean expressions (as opposed to expressions
which are to be equality-compared with a fixed value expression)
Use the .when method on the resulting object followed by .end to create a
complete case.
Examples
--------
expr = (ibis.case()
.when(cond1, result1)
.when(cond2, result2).end())
Returns
-------
case : CaseBuilder
"""
return _ops.SearchedCaseBuilder()
def now():
"""
Compute the current timestamp
Returns
-------
now : Timestamp scalar
"""
return _ops.TimestampNow().to_expr()
def row_number():
"""
Analytic function for the current row number, starting at 0
Returns
-------
row_number : IntArray
"""
return _ops.RowNumber().to_expr()
e = _ops.E().to_expr()
def _add_methods(klass, method_table):
for k, v in method_table.items():
setattr(klass, k, v)
def _unary_op(name, klass, doc=None):
def f(arg):
return klass(arg).to_expr()
f.__name__ = name
if doc is not None:
f.__doc__ = doc
else:
f.__doc__ = klass.__doc__
return f
def negate(arg):
"""
Negate a numeric expression
Parameters
----------
arg : numeric value expression
Returns
-------
negated : type of caller
"""
op = arg.op()
if hasattr(op, 'negate'):
result = op.negate()
else:
result = _ops.Negate(arg)
return result.to_expr()
def count(expr, where=None):
"""
Compute cardinality / sequence size of expression. For array expressions,
the count is excluding nulls. For tables, it's the size of the entire
table.
Returns
-------
counts : int64 type
"""
op = expr.op()
if isinstance(op, _ops.DistinctArray):
if where is not None:
raise NotImplementedError
result = op.count().to_expr()
else:
result = _ops.Count(expr, where).to_expr()
return result.name('count')
def group_concat(arg, sep=','):
"""
Concatenate values using the indicated separator (comma by default) to
produce a string
Parameters
----------
arg : array expression
sep : string, default ','
Returns
-------
concatenated : string scalar
"""
return _ops.GroupConcat(arg, sep).to_expr()
def _binop_expr(name, klass):
def f(self, other):
try:
other = as_value_expr(other)
op = klass(self, other)
return op.to_expr()
except _com.InputTypeError:
return NotImplemented
f.__name__ = name
return f
def _rbinop_expr(name, klass):
# For reflexive binary _ops, like radd, etc.
def f(self, other):
other = as_value_expr(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _boolean_binary_op(name, klass):
def f(self, other):
other = as_value_expr(other)
if not isinstance(other, BooleanValue):
raise TypeError(other)
op = klass(self, other)
return op.to_expr()
f.__name__ = name
return f
def _boolean_binary_rop(name, klass):
def f(self, other):
other = as_value_expr(other)
if not isinstance(other, BooleanValue):
raise TypeError(other)
op = klass(other, self)
return op.to_expr()
f.__name__ = name
return f
def _agg_function(name, klass, assign_default_name=True):
def f(self, where=None):
expr = klass(self, where).to_expr()
if assign_default_name:
expr = expr.name(name)
return expr
f.__name__ = name
return f
def _extract_field(name, klass):
def f(self):
return klass(self).to_expr()
f.__name__ = name
return f
# ---------------------------------------------------------------------
# Generic value API
def cast(arg, target_type):
# validate
op = _ops.Cast(arg, target_type)
if op.args[1] == arg.type():
# noop case if passed type is the same
return arg
else:
return op.to_expr()
cast.__doc__ = """
Cast value(s) to indicated data type. Values that cannot be
successfully casted
Parameters
----------
target_type : data type name
Notes
-----
{0}
Returns
-------
cast_expr : ValueExpr
""".format(_data_type_docs)
def hash(arg, how='fnv'):
"""
Compute an integer hash value for the indicated value expression.
Parameters
----------
arg : value expression
how : {'fnv'}, default 'fnv'
Hash algorithm to use
Returns
-------
hash_value : int64 expression
"""
return _ops.Hash(arg, how).to_expr()
def fillna(arg, fill_value):
"""
Replace any null values with the indicated fill value
Parameters
----------
fill_value : scalar / array value or expression
Examples
--------
result = table.col.fillna(5)
result2 = table.col.fillna(table.other_col * 3)
Returns
-------
filled : type of caller
"""
return _ops.IfNull(arg, fill_value).to_expr()
def coalesce(*args):
"""
Compute the first non-null value(s) from the passed arguments in
left-to-right order. This is also known as "combine_first" in pandas.
Parameters
----------
*args : variable-length value list
Examples
--------
result = coalesce(expr1, expr2, 5)
Returns
-------
coalesced : type of first provided argument
"""
return _ops.Coalesce(*args).to_expr()
def greatest(*args):
"""
Compute the largest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
greatest : type depending on arguments
"""
return _ops.Greatest(*args).to_expr()
def least(*args):
"""
Compute the smallest value (row-wise, if any arrays are present) among the
supplied arguments.
Returns
-------
least : type depending on arguments
"""
return _ops.Least(*args).to_expr()
def where(boolean_expr, true_expr, false_null_expr):
"""
Equivalent to the ternary expression: if X then Y else Z
Parameters
----------
boolean_expr : BooleanValue (array or scalar)
true_expr : value
Values for each True value
false_null_expr : value
Values for False or NULL values
Returns
-------
result : arity depending on inputs
Type of true_expr used to determine output type
"""
op = _ops.Where(boolean_expr, true_expr, false_null_expr)
return op.to_expr()
def over(expr, window):
"""
Turn an aggregation or full-sample analytic operation into a windowed
operation. See ibis.window for more details on window configuration
Parameters
----------
expr : value expression
window : ibis.Window
Returns
-------
expr : type of input
"""
prior_op = expr.op()
if isinstance(prior_op, _ops.WindowOp):
op = prior_op.over(window)
else:
op = _ops.WindowOp(expr, window)
result = op.to_expr()
try:
result = result.name(expr.get_name())
except:
pass
return result
def value_counts(arg, metric_name='count'):
"""
Compute a frequency table for this value expression
Parameters
----------
Returns
-------
counts : TableExpr
Aggregated table
"""
base = ir.find_base_table(arg)
metric = base.count().name(metric_name)
try:
arg.get_name()
except _com.ExpressionError:
arg = arg.name('unnamed')
return base.group_by(arg).aggregate(metric)
def nullif(value, null_if_expr):
"""
Set values to null if they match/equal a particular expression (scalar or
array-valued).
Common use to avoid divide-by-zero problems (get NULL instead of INF on
divide-by-zero): 5 / expr.nullif(0)
Parameters
----------
value : value expression
Value to modify
null_if_expr : value expression (array or scalar)
Returns
-------
null_if : type of caller
"""
return _ops.NullIf(value, null_if_expr).to_expr()
def between(arg, lower, upper):
"""
Check if the input expr falls between the lower/upper bounds
passed. Bounds are inclusive. All arguments must be comparable.
Returns
-------
is_between : BooleanValue
"""
lower = _ops.as_value_expr(lower)
upper = _ops.as_value_expr(upper)
op = _ops.Between(arg, lower, upper)
return op.to_expr()
def isin(arg, values):
"""
Check whether the value expression is contained within the indicated
list of values.
Parameters
----------
values : list, tuple, or array expression
The values can be scalar or array-like. Each of them must be
comparable with the calling expression, or None (NULL).
Examples
--------
expr = table.strings.isin(['foo', 'bar', 'baz'])
expr2 = table.strings.isin(table2.other_string_col)
Returns
-------
contains : BooleanValue
"""
op = _ops.Contains(arg, values)
return op.to_expr()
def notin(arg, values):
"""
Like isin, but checks whether this expression's value(s) are not
contained in the passed values. See isin docs for full usage.
"""
op = _ops.NotContains(arg, values)
return op.to_expr()
add = _binop_expr('__add__', _ops.Add)
sub = _binop_expr('__sub__', _ops.Subtract)
mul = _binop_expr('__mul__', _ops.Multiply)
div = _binop_expr('__div__', _ops.Divide)
pow = _binop_expr('__pow__', _ops.Power)
mod = _binop_expr('__mod__', _ops.Modulus)
rsub = _rbinop_expr('__rsub__', _ops.Subtract)
rdiv = _rbinop_expr('__rdiv__', _ops.Divide)
_generic_value_methods = dict(
hash=hash,
cast=cast,
fillna=fillna,
nullif=nullif,
between=between,
isin=isin,
notin=notin,
isnull=_unary_op('isnull', _ops.IsNull),
notnull=_unary_op('notnull', _ops.NotNull),
over=over,
__add__=add,
add=add,
__sub__=sub,
sub=sub,
__mul__=mul,
mul=mul,
__div__=div,
div=div,
__rdiv__=rdiv,
rdiv=rdiv,
__pow__=pow,
pow=pow,
__radd__=add,
__rsub__=rsub,
rsub=rsub,
__rmul__=_rbinop_expr('__rmul__', _ops.Multiply),
__rpow__=_binop_expr('__rpow__', _ops.Power),
__mod__=mod,
__rmod__=_rbinop_expr('__rmod__', _ops.Modulus),
__eq__=_binop_expr('__eq__', _ops.Equals),
__ne__=_binop_expr('__ne__', _ops.NotEquals),
__ge__=_binop_expr('__ge__', _ops.GreaterEqual),
__gt__=_binop_expr('__gt__', _ops.Greater),
__le__=_binop_expr('__le__', _ops.LessEqual),
__lt__=_binop_expr('__lt__', _ops.Less)
)
approx_nunique = _agg_function('approx_nunique', _ops.HLLCardinality, True)
approx_median = _agg_function('approx_median', _ops.CMSMedian, True)
max = _agg_function('max', _ops.Max, True)
min = _agg_function('min', _ops.Min, True)
def lag(arg, offset=None, default=None):
return _ops.Lag(arg, offset, default).to_expr()
def lead(arg, offset=None, default=None):
return _ops.Lead(arg, offset, default).to_expr()
first = _unary_op('first', _ops.FirstValue)
last = _unary_op('last', _ops.LastValue)
rank = _unary_op('rank', _ops.MinRank)
dense_rank = _unary_op('dense_rank', _ops.DenseRank)
cumsum = _unary_op('cumsum', _ops.CumulativeSum)
cummean = _unary_op('cummean', _ops.CumulativeMean)
cummin = _unary_op('cummin', _ops.CumulativeMin)
cummax = _unary_op('cummax', _ops.CumulativeMax)
def nth(arg, k):
"""
Analytic operation computing nth value from start of sequence
Parameters
----------
arg : array expression
k : int
Desired rank value
Returns
-------
nth : type of argument
"""
return _ops.NthValue(arg, k).to_expr()
def distinct(arg):
"""
Compute set of unique values occurring in this array. Can not be used
in conjunction with other array expressions from the same context
(because it's a cardinality-modifying pseudo-reduction).
"""
op = _ops.DistinctArray(arg)
return op.to_expr()
def nunique(arg):
"""
Shorthand for foo.distinct().count(); computing the number of unique
values in an array.
"""
return _ops.CountDistinct(arg).to_expr()
def topk(arg, k, by=None):
"""
Produces
Returns
-------
topk : TopK filter expression
"""
op = _ops.TopK(arg, k, by=by)
return op.to_expr()
def bottomk(arg, k, by=None):
raise NotImplementedError
def _case(arg):
"""
Create a new SimpleCaseBuilder to chain multiple if-else
statements. Add new search expressions with the .when method. These
must be comparable with this array expression. Conclude by calling
.end()
Examples
--------
case_expr = (expr.case()
.when(case1, output1)
.when(case2, output2)
.default(default_output)
.end())
Returns
-------
builder : CaseBuilder
"""
return _ops.SimpleCaseBuilder(arg)
def cases(arg, case_result_pairs, default=None):
"""
Create a case expression in one shot.
Returns
-------
case_expr : SimpleCase
"""
builder = arg.case()
for case, result in case_result_pairs:
builder = builder.when(case, result)
if default is not None:
builder = builder.else_(default)
return builder.end()
def _generic_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input value expression
Parameters
----------
arg : value expression
exact_nunique : boolean, default False
Compute the exact number of distinct values (slower)
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls')
]
if exact_nunique:
unique_metric = arg.nunique().name('uniques')
else:
unique_metric = arg.approx_nunique().name('uniques')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
def _numeric_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean()
]
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix)
def _wrap_summary_metrics(metrics, prefix):
result = expr_list(metrics)
if prefix is not None:
result = result.prefix(prefix)
return result
def expr_list(exprs):
for e in exprs:
e.get_name()
return ir.ExpressionList(exprs).to_expr()
_generic_array_methods = dict(
case=_case,
cases=cases,
bottomk=bottomk,
distinct=distinct,
nunique=nunique,
topk=topk,
summary=_generic_summary,
count=count,
min=min,
max=max,
approx_median=approx_median,
approx_nunique=approx_nunique,
group_concat=group_concat,
value_counts=value_counts,
first=first,
last=last,
dense_rank=dense_rank,
rank=rank,
# nth=nth,
lag=lag,
lead=lead,
cummin=cummin,
cummax=cummax,
)
_add_methods(ValueExpr, _generic_value_methods)
_add_methods(ArrayExpr, _generic_array_methods)
# ---------------------------------------------------------------------
# Numeric API
def round(arg, digits=None):
"""
Round values either to integer or indicated number of decimal places.
Returns
-------
rounded : type depending on digits argument
digits None or 0
decimal types: decimal
other numeric types: bigint
digits nonzero
decimal types: decimal
other numeric types: double
"""
op = _ops.Round(arg, digits)
return op.to_expr()
def log(arg, base=None):
"""
Perform the logarithm using a specified base
Parameters
----------
base : number, default None
If None, base e is used
Returns
-------
logarithm : double type
"""
op = _ops.Log(arg, base)
return op.to_expr()
def _integer_to_timestamp(arg, unit='s'):
"""
Convert integer UNIX timestamp (at some resolution) to a timestamp type
Parameters
----------
unit : {'s', 'ms', 'us'}
Second (s), millisecond (ms), or microsecond (us) resolution
Returns
-------
timestamp : timestamp value expression
"""
op = _ops.TimestampFromUNIX(arg, unit)
return op.to_expr()
abs = _unary_op('abs', _ops.Abs)
ceil = _unary_op('ceil', _ops.Ceil)
exp = _unary_op('exp', _ops.Exp)
floor = _unary_op('floor', _ops.Floor)
log2 = _unary_op('log2', _ops.Log2)
log10 = _unary_op('log10', _ops.Log10)
ln = _unary_op('ln', _ops.Ln)
sign = _unary_op('sign', _ops.Sign)
sqrt = _unary_op('sqrt', _ops.Sqrt)
_numeric_value_methods = dict(
__neg__=negate,
abs=abs,
ceil=ceil,
floor=floor,
sign=sign,
exp=exp,
sqrt=sqrt,
log=log,
ln=ln,
log2=log2,
log10=log10,
round=round,
zeroifnull=_unary_op('zeroifnull', _ops.ZeroIfNull),
)
def convert_base(arg, from_base, to_base):
"""
Convert number (as integer or string) from one base to another
Parameters
----------
arg : string or integer
from_base : integer
to_base : integer
Returns
-------
converted : string
"""
return _ops.BaseConvert(arg, from_base, to_base).to_expr()
_integer_value_methods = dict(
to_timestamp=_integer_to_timestamp,
convert_base=convert_base
)
mean = _agg_function('mean', _ops.Mean, True)
sum = _agg_function('sum', _ops.Sum, True)
_numeric_array_methods = dict(
mean=mean,
sum=sum,
cumsum=cumsum,
cummean=cummean,
bucket=bucket,
histogram=histogram,
summary=_numeric_summary,
)
_add_methods(NumericValue, _numeric_value_methods)
_add_methods(IntegerValue, _integer_value_methods)
_add_methods(NumericArray, _numeric_array_methods)
# ----------------------------------------------------------------------
# Boolean API
# TODO: logical binary operators for BooleanValue
def ifelse(arg, true_expr, false_expr):
"""
Shorthand for implementing ternary expressions
bool_expr.ifelse(0, 1)
e.g., in SQL: CASE WHEN bool_expr THEN 0 else 1 END
"""
# Result will be the result of promotion of true/false exprs. These
# might be conflicting types; same type resolution as case expressions
# must be used.
case = _ops.SearchedCaseBuilder()
return case.when(arg, true_expr).else_(false_expr).end()
_boolean_value_methods = dict(
ifelse=ifelse,
__and__=_boolean_binary_op('__and__', _ops.And),
__or__=_boolean_binary_op('__or__', _ops.Or),
__xor__=_boolean_binary_op('__xor__', _ops.Xor),
__rand__=_boolean_binary_rop('__rand__', _ops.And),
__ror__=_boolean_binary_rop('__ror__', _ops.Or),
__rxor__=_boolean_binary_rop('__rxor__', _ops.Xor)
)
_boolean_array_methods = dict(
any=_unary_op('any', _ops.Any),
notany=_unary_op('notany', _ops.NotAny),
all=_unary_op('all', _ops.All),
notall=_unary_op('notany', _ops.NotAll),
cumany=_unary_op('cumany', _ops.CumulativeAny),
cumall=_unary_op('cumall', _ops.CumulativeAll)
)
_add_methods(BooleanValue, _boolean_value_methods)
_add_methods(BooleanArray, _boolean_array_methods)
# ---------------------------------------------------------------------
# String API
def _string_substr(self, start, length=None):
"""
Pull substrings out of each string value by position and maximum
length.
Parameters
----------
start : int
First character to start splitting, indices starting at 0 (like
Python)
length : int, optional
Maximum length of each substring. If not supplied, splits each string
to the end
Returns
-------
substrings : type of caller
"""
op = _ops.Substring(self, start, length)
return op.to_expr()
def _string_left(self, nchars):
"""
Return left-most up to N characters from each string. Convenience
use of substr.
Returns
-------
substrings : type of caller
"""
return self.substr(0, length=nchars)
def _string_right(self, nchars):
"""
Split up to nchars starting from end of each string.
Returns
-------
substrings : type of caller
"""
return _ops.StrRight(self, nchars).to_expr()
def repeat(self, n):
"""
Returns the argument string repeated n times
Parameters
----------
n : int
Returns
-------
result : string
"""
return _ops.Repeat(self, n).to_expr()
def _translate(self, from_str, to_str):
"""
Returns string with set of 'from' characters replaced
by set of 'to' characters.
from_str[x] is replaced by to_str[x].
To avoid unexpected behavior, from_str should be
shorter than to_string.
Parameters
----------
from_str : string
to_str : string
Examples
--------
expr = table.strings.translate('a', 'b')
expr = table.string.translate('a', 'bc')
Returns
-------
translated : string
"""
return _ops.Translate(self, from_str, to_str).to_expr()
def _string_find(self, substr, start=None, end=None):
"""
Returns position (0 indexed) of first occurence of substring,
optionally after a particular position (0 indexed)
Parameters
----------
substr : string
start : int, default None
end : int, default None
Not currently implemented
Returns
-------
position : int, 0 indexed
"""
if end is not None:
raise NotImplementedError
return _ops.StringFind(self, substr, start, end).to_expr()
def _lpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on left) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
table.strings.lpad(5, '-')
'a' becomes '----a'
'abcdefg' becomes 'abcde'
Returns
-------
padded : string
"""
return _ops.LPad(self, length, pad).to_expr()
def _rpad(self, length, pad=' '):
"""
Returns string of given length by truncating (on right)
or padding (on right) original string
Parameters
----------
length : int
pad : string, default is ' '
Examples
--------
table.strings.rpad(5, '-')
'a' becomes 'a----'
'abcdefg' becomes 'abcde'
Returns
-------
padded : string
"""
return _ops.RPad(self, length, pad).to_expr()
def _find_in_set(self, str_list):
"""
Returns postion (0 indexed) of first occurence of argument within
a list of strings. No string in list can have a comma
Returns -1 if search string isn't found or if search string contains ','
Parameters
----------
str_list : list of strings
Examples
--------
table.strings.find_in_set(['a', 'b'])
Returns
-------
position : int
"""
return _ops.FindInSet(self, str_list).to_expr()
def _string_join(self, strings):
"""
Joins a list of strings together using the calling string as a separator
Parameters
----------
strings : list of strings
Examples
--------
sep = ibis.literal(',')
sep.join(['a','b','c'])
Returns
-------
joined : string
"""
return _ops.StringJoin(self, strings).to_expr()
def _string_like(self, pattern):
"""
Wildcard fuzzy matching function equivalent to the SQL LIKE directive. Use
% as a multiple-character wildcard or _ (underscore) as a single-character
wildcard.
Use re_search or rlike for regex-based matching.
Parameters
----------
pattern : string
Returns
-------
matched : boolean value
"""
return _ops.StringSQLLike(self, pattern).to_expr()
def re_search(arg, pattern):
"""
Search string values using a regular expression. Returns True if the regex
matches a string and False otherwise.
Parameters
----------
pattern : string (regular expression string)
Returns
-------
searched : boolean value
"""
return _ops.RegexSearch(arg, pattern).to_expr()
def regex_extract(arg, pattern, index):
"""
Returns specified index, 0 indexed, from string based on regex pattern
given
Parameters
----------
pattern : string (regular expression string)
index : int, 0 indexed
Returns
-------
extracted : string
"""
return _ops.RegexExtract(arg, pattern, index).to_expr()
def regex_replace(arg, pattern, replacement):
"""
Replaces match found by regex with replacement string.
Replacement string can also be a regex
Parameters
----------
pattern : string (regular expression string)
replacement : string (can be regular expression string)
Examples
--------
table.strings.replace('(b+)', r'<\1>')
'aaabbbaa' becomes 'aaa<bbb>aaa'
Returns
-------
modified : string
"""
return _ops.RegexReplace(arg, pattern, replacement).to_expr()
def parse_url(arg, extract, key=None):
"""
Returns the portion of a URL corresponding to a part specified
by 'extract'
Can optionally specify a key to retrieve an associated value
if extract parameter is 'QUERY'
Parameters
----------
extract : one of {'PROTOCOL', 'HOST', 'PATH', 'REF',
'AUTHORITY', 'FILE', 'USERINFO', 'QUERY'}
key : string (optional)
Examples
--------
parse_url("https://www.youtube.com/watch?v=kEuEcWfewf8&t=10", 'QUERY', 'v')
yields 'kEuEcWfewf8'
Returns
-------
extracted : string
"""
return _ops.ParseURL(arg, extract, key).to_expr()
def _string_contains(arg, substr):
"""
Determine if indicated string is exactly contained in the calling string.
Parameters
----------
substr
Returns
-------
contains : boolean
"""
return arg.like('%{0}%'.format(substr))
def _string_dunder_contains(arg, substr):
raise TypeError('Use val.contains(arg)')
_string_value_methods = dict(
length=_unary_op('length', _ops.StringLength),
lower=_unary_op('lower', _ops.Lowercase),
upper=_unary_op('upper', _ops.Uppercase),
reverse=_unary_op('reverse', _ops.Reverse),
ascii_str=_unary_op('ascii', _ops.StringAscii),
strip=_unary_op('strip', _ops.Strip),
lstrip=_unary_op('lstrip', _ops.LStrip),
rstrip=_unary_op('rstrip', _ops.RStrip),
capitalize=_unary_op('initcap', _ops.Capitalize),
convert_base=convert_base,
__contains__=_string_dunder_contains,
contains=_string_contains,
like=_string_like,
rlike=re_search,
re_search=re_search,
re_extract=regex_extract,
re_replace=regex_replace,
parse_url=parse_url,
substr=_string_substr,
left=_string_left,
right=_string_right,
repeat=repeat,
find=_string_find,
translate=_translate,
find_in_set=_find_in_set,
join=_string_join,
lpad=_lpad,
rpad=_rpad,
)
_add_methods(StringValue, _string_value_methods)
# ---------------------------------------------------------------------
# Timestamp API
def _timestamp_truncate(arg, unit):
"""
Zero out smaller-size units beyond indicated unit. Commonly used for time
series resampling.
Parameters
----------
unit : string, one of below table
'Y': year
'Q': quarter
'M': month
'D': day
'W': week
'H': hour
'MI': minute
Returns
-------
truncated : timestamp
"""
return _ops.Truncate(arg, unit).to_expr()
_timestamp_value_methods = dict(
year=_extract_field('year', _ops.ExtractYear),
month=_extract_field('month', _ops.ExtractMonth),
day=_extract_field('day', _ops.ExtractDay),
hour=_extract_field('hour', _ops.ExtractHour),
minute=_extract_field('minute', _ops.ExtractMinute),
second=_extract_field('second', _ops.ExtractSecond),
millisecond=_extract_field('millisecond', _ops.ExtractMillisecond),
truncate=_timestamp_truncate
)
_add_methods(TimestampValue, _timestamp_value_methods)
# ---------------------------------------------------------------------
# Decimal API
_decimal_value_methods = dict(
precision=_unary_op('precision', _ops.DecimalPrecision),
scale=_unary_op('scale', _ops.DecimalScale),
)
_add_methods(DecimalValue, _decimal_value_methods)
# ----------------------------------------------------------------------
# Category API
_category_value_methods = dict(
label=_analytics.category_label
)
_add_methods(CategoryValue, _category_value_methods)
# ---------------------------------------------------------------------
# Table API
_join_classes = {
'inner': _ops.InnerJoin,
'left': _ops.LeftJoin,
'outer': _ops.OuterJoin,
'left_semi': _ops.LeftSemiJoin,
'semi': _ops.LeftSemiJoin,
'anti': _ops.LeftAntiJoin,
'cross': _ops.CrossJoin
}
def join(left, right, predicates=(), how='inner'):
"""
Perform a relational join between two tables. Does not resolve resulting
table schema.
Parameters
----------
left : TableExpr
right : TableExpr
predicates : join expression(s)
how : string, default 'inner'
- 'inner': inner join
- 'left': left join
- 'outer': full outer join
- 'semi' or 'left_semi': left semi join
- 'anti': anti join
Returns
-------
joined : TableExpr
Note, schema is not materialized yet
"""
klass = _join_classes[how.lower()]
if isinstance(predicates, Expr):
predicates = _L.unwrap_ands(predicates)
op = klass(left, right, predicates)
return TableExpr(op)
def cross_join(*args, **kwargs):
"""
Perform a cross join (cartesian product) amongst a list of tables, with
optional set of prefixes to apply to overlapping column names
Parameters
----------
positional args: tables to join
prefixes keyword : prefixes for each table
Not yet implemented
Examples
--------
>>> joined1 = ibis.cross_join(a, b, c, d, e)
>>> joined2 = ibis.cross_join(a, b, c, prefixes=['a_', 'b_', 'c_']))
Returns
-------
joined : TableExpr
If prefixes not provided, the result schema is not yet materialized
"""
op = _ops.CrossJoin(*args, **kwargs)
return TableExpr(op)
def _table_count(self):
"""
Returns the computed number of rows in the table expression
Returns
-------
count : Int64Scalar
"""
return _ops.Count(self, None).to_expr().name('count')
def _table_info(self, buf=None):
"""
Similar to pandas DataFrame.info. Show column names, types, and null
counts. Output to stdout by default
"""
metrics = [self.count().name('nrows')]
for col in self.columns:
metrics.append(self[col].count().name(col))
metrics = self.aggregate(metrics).execute().loc[0]
names = ['Column', '------'] + self.columns
types = ['Type', '----'] + [repr(x) for x in self.schema().types]
counts = ['Non-null #', '----------'] + [str(x) for x in metrics[1:]]
col_metrics = util.adjoin(2, names, types, counts)
if buf is None:
import sys
buf = sys.stdout
result = ('Table rows: {0}\n\n'
'{1}'
.format(metrics[0], col_metrics))
buf.write(result)
def _table_set_column(table, name, expr):
"""
Replace an existing column with a new expression
Parameters
----------
name : string
Column name to replace
expr : value expression
New data for column
Returns
-------
set_table : TableExpr
New table expression
"""
expr = table._ensure_expr(expr)
if expr._name != name:
expr = expr.name(name)
if name not in table:
raise KeyError('{0} is not in the table'.format(name))
# TODO: This assumes that projection is required; may be backend-dependent
proj_exprs = []
for key in table.columns:
if key == name:
proj_exprs.append(expr)
else:
proj_exprs.append(table[key])
return table.projection(proj_exprs)
def _regular_join_method(name, how, doc=None):
def f(self, other, predicates=()):
return self.join(other, predicates, how=how)
if doc:
f.__doc__ = doc
else:
# XXX
f.__doc__ = join.__doc__
f.__name__ = name
return f
def filter(table, predicates):
"""
Select rows from table based on boolean expressions
Parameters
----------
predicates : boolean array expressions, or list thereof
Returns
-------
filtered_expr : TableExpr
"""
if isinstance(predicates, Expr):
predicates = _L.unwrap_ands(predicates)
predicates = util.promote_list(predicates)
predicates = [ir.bind_expr(table, x) for x in predicates]
resolved_predicates = []
for pred in predicates:
if isinstance(pred, ir.AnalyticExpr):
pred = pred.to_filter()
resolved_predicates.append(pred)
op = _L.apply_filter(table, resolved_predicates)
return TableExpr(op)
def aggregate(table, metrics=None, by=None, having=None, **kwds):
"""
Aggregate a table with a given set of reductions, with grouping
expressions, and post-aggregation filters.
Parameters
----------
table : table expression
metrics : expression or expression list
by : optional, default None
Grouping expressions
having : optional, default None
Post-aggregation filters
Returns
-------
agg_expr : TableExpr
"""
if metrics is None:
metrics = []
for k, v in sorted(kwds.items()):
v = table._ensure_expr(v)
metrics.append(v.name(k))
op = _ops.Aggregation(table, metrics, by=by, having=having)
return TableExpr(op)
def _table_limit(table, n, offset=0):
"""
Select the first n rows at beginning of table (may not be deterministic
depending on implementatino and presence of a sorting).
Parameters
----------
n : int
Rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
"""
op = _ops.Limit(table, n, offset=offset)
return TableExpr(op)
def _table_sort_by(table, sort_exprs):
"""
Sort table by the indicated column expressions and sort orders
(ascending/descending)
Parameters
----------
sort_exprs : sorting expressions
Must be one of:
- Column name or expression
- Sort key, e.g. desc(col)
- (column name, True (ascending) / False (descending))
Examples
--------
sorted = table.sort_by([('a', True), ('b', False)])
Returns
-------
sorted : TableExpr
"""
op = _ops.SortBy(table, sort_exprs)
return TableExpr(op)
def _table_union(left, right, distinct=False):
"""
Form the table set union of two table expressions having identical
schemas.
Parameters
----------
right : TableExpr
distinct : boolean, default False
Only union distinct rows not occurring in the calling table (this
can be very expensive, be careful)
Returns
-------
union : TableExpr
"""
op = _ops.Union(left, right, distinct=distinct)
return TableExpr(op)
def mutate(table, exprs=None, **kwds):
"""
Convenience function for table projections involving adding columns
Parameters
----------
exprs : list, default None
List of named expressions to add as columns
kwds : keywords for new columns
Examples
--------
expr = table.mutate(qux=table.foo + table.bar, baz=5)
Returns
-------
mutated : TableExpr
"""
if exprs is None:
exprs = []
else:
exprs = util.promote_list(exprs)
for k, v in sorted(kwds.items()):
if util.is_function(v):
v = v(table)
else:
v = as_value_expr(v)
exprs.append(v.name(k))
has_replacement = False
for expr in exprs:
if expr.get_name() in table:
has_replacement = True
if has_replacement:
by_name = dict((x.get_name(), x) for x in exprs)
used = set()
proj_exprs = []
for c in table.columns:
if c in by_name:
proj_exprs.append(by_name[c])
used.add(c)
else:
proj_exprs.append(c)
for x in exprs:
if x.get_name() not in used:
proj_exprs.append(x)
return table.projection(proj_exprs)
else:
return table.projection([table] + exprs)
_table_methods = dict(
aggregate=aggregate,
count=_table_count,
info=_table_info,
limit=_table_limit,
set_column=_table_set_column,
filter=filter,
mutate=mutate,
join=join,
cross_join=cross_join,
inner_join=_regular_join_method('inner_join', 'inner'),
left_join=_regular_join_method('left_join', 'left'),
outer_join=_regular_join_method('outer_join', 'outer'),
semi_join=_regular_join_method('semi_join', 'semi'),
anti_join=_regular_join_method('anti_join', 'anti'),
sort_by=_table_sort_by,
union=_table_union
)
_add_methods(TableExpr, _table_methods)
| apache-2.0 |
BiaDarkia/scikit-learn | sklearn/linear_model/tests/test_perceptron.py | 39 | 1856 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(max_iter=100, tol=None, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(max_iter=2, shuffle=False, tol=None)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron(max_iter=100)
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
| bsd-3-clause |
olologin/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 47 | 2486 | # Author: Tom Dupre la Tour <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def assert_csr_equal(X, Y):
X.eliminate_zeros()
Y.eliminate_zeros()
assert_equal(X.shape[0], Y.shape[0])
assert_equal(X.shape[1], Y.shape[1])
assert_array_equal(X.data, Y.data)
assert_array_equal(X.indices, Y.indices)
assert_array_equal(X.indptr, Y.indptr)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_csr_equal(xi, X_csr[idx])
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
gkioxari/RstarCNN | lib/fast_rcnn/test_stanford40.py | 1 | 10561 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from fast_rcnn.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
from utils.cython_nms import nms
import cPickle
import heapq
from utils.blob import im_list_to_blob
import os
import scipy.io as sio
import utils.cython_bbox
import pdb
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois, levels = _project_im_rois(im_rois, im_scale_factors)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None, 'secondary_rois': None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
blobs['secondary_rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def _bbox_pred(boxes, box_deltas):
"""Transform the set of class-agnostic boxes into class-specific boxes
by applying the predicted offsets (box_deltas)
"""
if boxes.shape[0] == 0:
return np.zeros((0, box_deltas.shape[1]))
boxes = boxes.astype(np.float, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + cfg.EPS
heights = boxes[:, 3] - boxes[:, 1] + cfg.EPS
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = box_deltas[:, 0::4]
dy = box_deltas[:, 1::4]
dw = box_deltas[:, 2::4]
dh = box_deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(box_deltas.shape)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def im_detect(net, im, boxes, gt_label):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
base_shape = blobs['data'].shape
gt_inds = np.where(gt_label>-1)[0]
num_rois = len(gt_inds)
blobs_rois = blobs['rois'][gt_inds].astype(np.float32, copy=False)
blobs_rois = blobs_rois[:, :, np.newaxis, np.newaxis]
non_gt_inds = np.where(gt_label==-1)[0]
num_sec_rois = len(non_gt_inds)
blobs_sec_rois = blobs['secondary_rois'][non_gt_inds].astype(np.float32, copy=False)
blobs_sec_rois = blobs_sec_rois[:, :, np.newaxis, np.newaxis]
# reshape network inputs
net.blobs['data'].reshape(base_shape[0], base_shape[1],
base_shape[2], base_shape[3])
net.blobs['rois'].reshape(num_rois, 5, 1, 1)
net.blobs['secondary_rois'].reshape(num_sec_rois, 5, 1, 1)
blobs_out = net.forward(data=blobs['data'].astype(np.float32, copy=False),
rois=blobs_rois,
secondary_rois = blobs_sec_rois)
scores = blobs_out['cls_score']
secondary_scores = blobs_out['context_cls_score']
gt_boxes = boxes[gt_inds]
sec_boxes = boxes[non_gt_inds]
# Compute overlap
boxes_overlaps = \
utils.cython_bbox.bbox_overlaps(sec_boxes.astype(np.float),
gt_boxes.astype(np.float))
selected_boxes = np.zeros((scores.shape[1], 4, gt_boxes.shape[0]))
# Sum of Max
for i in xrange(gt_boxes.shape[0]):
keep_inds = np.where((boxes_overlaps[:,i]>=cfg.TEST.IOU_LB) &
(boxes_overlaps[:,i]<=cfg.TEST.IOU_UB))[0]
if keep_inds.size > 0:
this_scores = np.amax(secondary_scores[keep_inds,:], axis=0)
scores[i,:] = scores[i,:]+this_scores
winner_ind = np.argmax(secondary_scores[keep_inds,:], axis=0)
selected_boxes[:,:,i] = sec_boxes[keep_inds[winner_ind]]
# Softmax
scores = np.exp(scores-np.amax(scores))
scores = scores / np.array(np.sum(scores, axis=1), ndmin=2).T
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = _bbox_pred(gt_boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
return scores, secondary_scores, selected_boxes
def vis_detections(im, boxes, scores, classes):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
im = im[:, :, (2, 1, 0)]
for i in xrange(1):
pdb.set_trace()
bbox = boxes[i, :4]
sscore = scores[i, :]
cls_ind = sscore.argmax()
sscore = sscore.max()
#plt.cla()
plt.imshow(im)
plt.gca().add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='r', linewidth=3)
)
plt.title('{} {:.3f}'.format(classes[cls_ind], sscore))
plt.show()
def test_net(net, imdb):
"""Test a R*CNN network on an image database."""
num_images = len(imdb.image_index)
num_classes = imdb.num_classes
all_scores = {}
all_labels = {}
for a in xrange(num_classes):
all_scores[imdb.classes[a]] = np.zeros((num_images,1), dtype = np.float32)
all_labels[imdb.classes[a]] = -np.ones((num_images,1), dtype = np.int16)
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# timers
_t = {'im_detect' : Timer()}
roidb = imdb.roidb
for i in xrange(num_images):
im = cv2.imread(imdb.image_path_at(i))
gt = np.where(roidb[i]['gt_classes']>-1)[0]
gt_boxes = roidb[i]['boxes'][gt]
gt_class = roidb[i]['gt_classes'][gt]
assert (gt_boxes.shape[0]==1)
_t['im_detect'].tic()
scores, secondary_scores, selected_boxes = im_detect(net, im, roidb[i]['boxes'], roidb[i]['gt_classes'])
_t['im_detect'].toc()
# Visualize detections
# vis_detections(im, gt_boxes, scores, imdb.classes)
for a in xrange(num_classes):
all_scores[imdb.classes[a]][i] = scores[0,a]
all_labels[imdb.classes[a]][i] = gt_class
print 'im_detect: {:d}/{:d} {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time)
#f = {'images': imdb.image_index, 'scores': all_boxes, 'classes': imdb.classes, 'context_boxes': all_selected_boxes}
#output_dir = os.path.join(os.path.dirname(__file__), 'output',
# 'Action_MIL_wContextBoxes_'+ imdb.name)
#if not os.path.exists(output_dir):
# os.makedirs(output_dir)
#det_file = os.path.join(output_dir, 'res.mat')
#sio.savemat(det_file, {'data': f})
#print 'Saving in', det_file
imdb._ap(all_scores, all_labels)
| bsd-2-clause |
JoelDan192/TemporalReconstruction_SE | create_votes.py | 1 | 7142 | import pandas as pd
questions = pd.DataFrame.from_csv('question_simple.csv', index_col=None)
a_questions = pd.DataFrame.from_csv('question_votes.csv', index_col=None)
get_votes_qv = lambda df: pd.Series((df.VoteType==2).cumsum() + (df.VoteType==3).cumsum(),name='QVotes')
get_score_qv = lambda df: pd.Series((df.VoteType==2).cumsum() - (df.VoteType==3).cumsum(),name='QScore')
predictors_qvotes = ['QuestionId','QuestionCreation','QuestionLastActivity','AcceptedAnsId','AcceptedDate','QVoteCreation']
f_q = lambda df: pd.concat([df[cname] for cname in df.columns.values.tolist() if cname in predictors_qvotes]+[get_score_qv(df),get_votes_qv(df)],axis=1)
a_questions = a_questions.sort_values(by='QVoteCreation').groupby(['QuestionId']).apply(f_q)
a_votes = pd.DataFrame.from_csv('votes-answers.csv', index_col=None)
a_votes = pd.merge(a_votes, a_questions, how='inner', on=['QuestionId'],suffixes=['_v', '_q'])
predictors_raw_votans =['VoteId','VoteCreation','AnsCreation','VoteType','AnsId','QuestionId','AnsWordCount','QuestionCreation','AcceptedAnsId','AcceptedDate']
valid_qavotes = lambda df: df[df.VoteCreation>=df.QVoteCreation]
#Use twice valid_qavotes, could use once to improve efficiency, but check correctness of index selection
get_max_qv = lambda df: valid_qavotes(df).loc[valid_qavotes(df).QVotes.idxmax(),['QScore','QVotes']].squeeze()
get_latest_qv = lambda df : pd.Series([0,0],index=['QScore','QVotes']) if not (df.VoteCreation>=df.QVoteCreation).any() else get_max_qv(df)
get_head = lambda df: [df[cname].iloc[0] for cname in df.columns.values.tolist() if cname in predictors_raw_votans]
get_qv = lambda df : pd.Series(get_head(df),index=predictors_raw_votans).append(get_latest_qv(df)).to_frame()
a_votes = a_votes.sort_values(by='VoteCreation').groupby(['VoteId']).apply(get_qv).unstack(level=-1).reset_index(level=[0],drop=True)
a_votes.drop(a_votes.columns[[0]], axis=1, inplace=True)
a_votes.columns = a_votes.columns.droplevel()
date_placeholder = '2016-07-20T00:00:00.000' #Date After Data Set Collection
#a_votes.loc[a_votes.AcceptedDate == 'None','AcceptedDate'] = pd.to_datetime(date_placeholder)
a_votes['AcceptedDate'].fillna(pd.to_datetime(date_placeholder),inplace=True)
a_votes['AcceptedAge'] = (pd.to_datetime(a_votes.AcceptedDate,format='%Y-%m-%d %H:%M:%S.%f')
-pd.to_datetime(a_votes.QuestionCreation,format='%Y-%m-%d %H:%M:%S.%f')).apply(lambda x: x.astype('timedelta64[D]').item().days)
a_votes['AcceptedAge'] = a_votes['AcceptedAge'] + 1
a_votes.loc[a_votes.AcceptedDate == pd.to_datetime(date_placeholder), 'AcceptedAge'] = -1
a_votes['Age'] = (pd.to_datetime(a_votes.VoteCreation,format='%Y-%m-%d %H:%M:%S.%f')
-pd.to_datetime(a_votes.QuestionCreation,format='%Y-%m-%d %H:%M:%S.%f')).apply(lambda x: x.astype('timedelta64[D]').item().days)
a_votes['Age'] = a_votes['Age'] + 1
a_votes.drop(a_votes.columns[[0, 1, 6, 8]], axis=1, inplace=True)
get_score = lambda df: sum(df.VoteType==2) - sum(df.VoteType==3)
get_votes = lambda df: sum(df.VoteType==2) + sum(df.VoteType==3)
predictors = ['QuestionId','AnsWordCount','AcceptedAnsId','AcceptedAge','QScore',
'QVotes','Score','Votes','Upvotes','Downvotes']
f = lambda df: pd.Series([df.QuestionId.iloc[0],df.AnsWordCount.iloc[0],df.AcceptedAnsId.iloc[0],df.AcceptedAge.iloc[0],
df.QScore.iloc[0],df.QVotes.iloc[0],get_score(df),get_votes(df),sum(df.VoteType==2),sum(df.VoteType==3)],index = predictors)
a_groups = a_votes.sort_values(by='Age').groupby(['AnsId','Age']).apply(f)
a_groups = a_groups.reset_index(level=[0,1],drop=False)
cum_votes = lambda df: pd.Series(df['Votes'].cumsum(),name='CumVotes')
cum_score = lambda df: pd.Series(df['Score'].cumsum(),name='CumScore')
get_cumulative =lambda df: pd.concat([df[cname] for cname in df.columns.values.tolist()] + [cum_votes(df),cum_score(df)],axis=1)
ff = lambda df: get_cumulative(df.sort_values(by='Age'))
a_groups_c = a_groups.groupby(['AnsId']).apply(ff).reset_index(level=[0],drop=True)
prior_quality = float(a_groups_c['Upvotes'].sum())/(a_groups_c['Upvotes'].sum() + a_groups_c['Downvotes'].sum())
a_groups_c['ReScore'] = (a_groups_c['CumScore']+prior_quality)/(a_groups_c['CumVotes']+1.0)
a_groups_c['QReScore'] = a_groups_c['QScore']/(a_groups_c['QVotes']+1.0)
votes_com_f = a_groups_c
from itertools import izip
def rank_ans(df,score_only,re_score):
rk_name = "ReScore_rank" if re_score else "AnsRank"
def rank_iter():
cache = {}
accepted = 0
for row in df.itertuples():
if re_score:
cache[row.AnsId] = row.ReScore
else :
cache[row.AnsId] = row.Score
# rank, nb_ans
if (not score_only) and row.AcceptedAge>-1 and (row.AnsId == row.AcceptedAnsId) and row.Age >=row.AcceptedAge:
accepted = 1
if row.AnsId in cache:
del cache[row.AnsId]
yield (1,len(cache)+accepted,row.Index)
else :
rank = sorted(cache, key= lambda k:cache[k],reverse=True).index(row.AnsId) + 1 + accepted
yield (rank,len(cache)+accepted,row.Index)
ranks, ans_counts, indices = izip(*list(rank_iter())) #TODO: optimize for the future
return [pd.Series(ranks,name=rk_name, index=indices), pd.Series(ans_counts,name="Ans_count", index=indices)]
predictors = ['QuestionId','AnsId','AnsWordCount','AcceptedAnsId','Age',
'Score','Votes','Upvotes','Downvotes','CumScore','CumVotes','QScore'
,'QVotes','ReScore','QReScore','AnsRank','ReScore_rank']
get_ranks = lambda df,score_only=False,re_score=False: pd.concat(
[df[cname] for cname in df.columns.values.tolist() if cname in predictors] + rank_ans(df,score_only,re_score),axis=1)
sort_age_score = lambda df: df.sort_values(by=['Age','Score'],ascending=[True,False])
votes_com_f = votes_com_f.groupby(['QuestionId']).apply(
lambda df: get_ranks(sort_age_score(df))).reset_index(drop=True)
votes_com_f = votes_com_f.groupby(['QuestionId']).apply(
lambda df: get_ranks(sort_age_score(df),score_only=True,re_score=True)).reset_index(drop=True)
votes_com_f['Pbias'] = 1.0/votes_com_f['AnsRank']
votes_com_f['DRank'] = votes_com_f['AnsRank'] - votes_com_f['ReScore_rank']
#AnsRank and Ans_count define unique EPbias
sum_by_rank = lambda df: df.groupby('AnsRank').apply(
lambda df: pd.Series([df.Votes.sum()],name='EPbias').to_frame()).unstack(level=-1).reset_index(level=0,drop=False)
get_ratio = lambda df: sum_by_rank(df).EPbias/(sum_by_rank(df).EPbias.sum())
ratio_per_rank = lambda df: pd.concat([sum_by_rank(df).AnsRank, get_ratio(df)],axis=1)
get_position_bias = lambda df: pd.merge(df,ratio_per_rank(df),how='inner',on=['AnsRank'])
votes = votes_com_f.groupby(['Ans_count']).apply(get_position_bias).reset_index(level=[0,1],drop=True)
votes.columns.values[-1] = "EPbias"
test_epbias = votes.groupby(['Ans_count','AnsRank']).first().reset_index(
level=[0,1],drop=False)[['Ans_count','AnsRank','EPbias']]
test_epbias.to_csv('EPbiasbyAnsCountRank.csv')
votes.to_csv(path_or_buf='AnsVotes_TSeries.csv')
| mit |
Adai0808/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
probml/pyprobml | scripts/kmeans_silhouette_old.py | 1 | 5953 |
# K-means clustering in 2d
# Code is from chapter 9 of
# https://github.com/ageron/handson-ml2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# To plot pretty figures
#%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
if 0:
blob_centers = np.array(
[[ 0.2, 2.3],
[-1.5 , 2.3],
[-2.8, 1.8],
[-2.8, 2.8],
[-2.8, 1.3]])
blob_std = np.array([0.4, 0.3, 0.1, 0.1, 0.1])
X, y = make_blobs(n_samples=2000, centers=blob_centers,
cluster_std=blob_std, random_state=7)
geron_data = True
if 1:
# two off-diagonal blobs
X1, _ = make_blobs(n_samples=1000, centers=((4, -4), (0, 0)), random_state=42)
X1 = X1.dot(np.array([[0.374, 0.95], [0.732, 0.598]]))
# three spherical blobs
blob_centers = np.array(
[[ -4, 1],
[-4 , 3],
[-4, -2]])
s = 0.5
blob_std = np.array([s, s, s])
X2, _ = make_blobs(n_samples=1000, centers=blob_centers,
cluster_std=blob_std, random_state=7)
X = np.r_[X1, X2]
geron_data= False
kmeans_per_k = [KMeans(n_clusters=k, random_state=42).fit(X)
for k in range(1, 10)]
inertias = [model.inertia_ for model in kmeans_per_k[1:]]
plt.figure(figsize=(8, 3))
plt.plot(range(2, 10), inertias, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Distortion", fontsize=14)
if geron_data:
plt.annotate('Elbow',
xy=(4, inertias[3]),
xytext=(0.55, 0.55),
textcoords='figure fraction',
fontsize=16,
arrowprops=dict(facecolor='black', shrink=0.1)
)
#plt.axis([1, 8.5, 0, 1300])
plt.tight_layout()
plt.savefig("../figures/kmeans_distortion_vs_k.pdf", dpi=300)
plt.show()
silhouette_scores = [silhouette_score(X, model.labels_)
for model in kmeans_per_k[1:]]
plt.figure(figsize=(8, 3))
plt.plot(range(2, 10), silhouette_scores, "bo-")
plt.xlabel("$k$", fontsize=14)
plt.ylabel("Silhouette score", fontsize=14)
#plt.axis([1.8, 8.5, 0.55, 0.7])
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_vs_k.pdf", dpi=300)
plt.show()
##########
from sklearn.metrics import silhouette_samples
from matplotlib.ticker import FixedLocator, FixedFormatter
plt.figure(figsize=(11, 9))
for k in (3, 4, 5, 6):
plt.subplot(2, 2, k - 2)
y_pred = kmeans_per_k[k - 1].labels_
silhouette_coefficients = silhouette_samples(X, y_pred)
padding = len(X) // 30
pos = padding
ticks = []
cmap = cm.get_cmap("Pastel2")
colors = [cmap(i) for i in range(k)]
for i in range(k):
coeffs = silhouette_coefficients[y_pred == i]
coeffs.sort()
color = mpl.cm.Spectral(i / k)
#color = colors[i]
plt.fill_betweenx(np.arange(pos, pos + len(coeffs)), 0, coeffs,
facecolor=color, edgecolor=color, alpha=0.7)
#cmap = "Pastel2"
#plt.fill_betweenx(np.arange(pos, pos + len(coeffs)), 0, coeffs,
# cmap=cmap, alpha=0.7)
ticks.append(pos + len(coeffs) // 2)
pos += len(coeffs) + padding
plt.gca().yaxis.set_major_locator(FixedLocator(ticks))
plt.gca().yaxis.set_major_formatter(FixedFormatter(range(k)))
if k in (3, 5):
plt.ylabel("Cluster")
if k in (5, 6):
plt.gca().set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.xlabel("Silhouette Coefficient")
else:
plt.tick_params(labelbottom=False)
score = silhouette_scores[k - 2]
plt.axvline(x=score, color="red", linestyle="--")
plt.title("$k={}, score={:0.2f}$".format(k, score), fontsize=16)
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_diagram.pdf", dpi=300)
plt.show()
##########
def plot_data(X):
plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)
def plot_centroids(centroids, weights=None, circle_color='w', cross_color='k'):
if weights is not None:
centroids = centroids[weights > weights.max() / 10]
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='o', s=30, linewidths=8,
color=circle_color, zorder=10, alpha=0.9)
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=50, linewidths=50,
color=cross_color, zorder=11, alpha=1)
def plot_decision_boundaries(clusterer, X, K, resolution=1000):
mins = X.min(axis=0) - 0.1
maxs = X.max(axis=0) + 0.1
xx, yy = np.meshgrid(np.linspace(mins[0], maxs[0], resolution),
np.linspace(mins[1], maxs[1], resolution))
Z = clusterer.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
print(np.unique(Z))
#cmap = [mpl.cm.Spectral( (i / K)) for i in range(K)]
cmap ="Pastel2"
#cmap = mpl.cm.Spectral(K)
plt.contourf(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),cmap=cmap)
##cmap = cm.get_cmap("Pastel2")
#colors = [cmap(i) for i in range(K)]
#print(colors)
#plt.contourf(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),colors=colors)
plt.contour(Z, extent=(mins[0], maxs[0], mins[1], maxs[1]),
linewidths=1, colors='k')
plot_data(X)
plot_centroids(clusterer.cluster_centers_)
#K, D = clusterer.cluster_centers_.shape
plt.title(f'K={K}')
plt.figure(figsize=(11, 9))
for k in (3, 4, 5, 6):
plt.subplot(2, 2, k - 2)
plot_decision_boundaries(kmeans_per_k[k-1], X, k)
plt.tight_layout()
plt.savefig("../figures/kmeans_silhouette_voronoi.pdf", dpi=300)
plt.show()
X_new = np.array([[0, 2], [3, 2], [-3, 3], [-3, 2.5]])
clusterer = kmeans_per_k[3-1]
Z = clusterer.predict(X_new)
print(Z)
| mit |
RachitKansal/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/tseries/tools.py | 7 | 27601 | from datetime import datetime, timedelta, time
import numpy as np
from collections import MutableMapping
import pandas.lib as lib
import pandas.tslib as tslib
from pandas.types.common import (_ensure_object,
is_datetime64_ns_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_integer_dtype,
is_list_like)
from pandas.types.generic import (ABCIndexClass, ABCSeries,
ABCDataFrame)
from pandas.types.missing import notnull
import pandas.compat as compat
from pandas.util.decorators import deprecate_kwarg
_DATEUTIL_LEXER_SPLIT = None
try:
# Since these are private methods from dateutil, it is safely imported
# here so in case this interface changes, pandas will just fallback
# to not using the functionality
from dateutil.parser import _timelex
if hasattr(_timelex, 'split'):
def _lexer_split_from_str(dt_str):
# The StringIO(str(_)) is for dateutil 2.2 compatibility
return _timelex.split(compat.StringIO(str(dt_str)))
_DATEUTIL_LEXER_SPLIT = _lexer_split_from_str
except (ImportError, AttributeError):
pass
def _infer_tzinfo(start, end):
def _infer(a, b):
tz = a.tzinfo
if b and b.tzinfo:
if not (tslib.get_timezone(tz) == tslib.get_timezone(b.tzinfo)):
raise AssertionError('Inputs must both have the same timezone,'
' {0} != {1}'.format(tz, b.tzinfo))
return tz
tz = None
if start is not None:
tz = _infer(start, end)
elif end is not None:
tz = _infer(end, start)
return tz
def _guess_datetime_format(dt_str, dayfirst=False,
dt_str_parse=compat.parse_date,
dt_str_split=_DATEUTIL_LEXER_SPLIT):
"""
Guess the datetime format of a given datetime string.
Parameters
----------
dt_str : string, datetime string to guess the format of
dayfirst : boolean, default False
If True parses dates with the day first, eg 20/01/2005
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug).
dt_str_parse : function, defaults to `compat.parse_date` (dateutil)
This function should take in a datetime string and return
a `datetime.datetime` guess that the datetime string represents
dt_str_split : function, defaults to `_DATEUTIL_LEXER_SPLIT` (dateutil)
This function should take in a datetime string and return
a list of strings, the guess of the various specific parts
e.g. '2011/12/30' -> ['2011', '/', '12', '/', '30']
Returns
-------
ret : datetime format string (for `strftime` or `strptime`)
"""
if dt_str_parse is None or dt_str_split is None:
return None
if not isinstance(dt_str, compat.string_types):
return None
day_attribute_and_format = (('day',), '%d', 2)
# attr name, format, padding (if any)
datetime_attrs_to_format = [
(('year', 'month', 'day'), '%Y%m%d', 0),
(('year',), '%Y', 0),
(('month',), '%B', 0),
(('month',), '%b', 0),
(('month',), '%m', 2),
day_attribute_and_format,
(('hour',), '%H', 2),
(('minute',), '%M', 2),
(('second',), '%S', 2),
(('microsecond',), '%f', 6),
(('second', 'microsecond'), '%S.%f', 0),
]
if dayfirst:
datetime_attrs_to_format.remove(day_attribute_and_format)
datetime_attrs_to_format.insert(0, day_attribute_and_format)
try:
parsed_datetime = dt_str_parse(dt_str, dayfirst=dayfirst)
except:
# In case the datetime can't be parsed, its format cannot be guessed
return None
if parsed_datetime is None:
return None
try:
tokens = dt_str_split(dt_str)
except:
# In case the datetime string can't be split, its format cannot
# be guessed
return None
format_guess = [None] * len(tokens)
found_attrs = set()
for attrs, attr_format, padding in datetime_attrs_to_format:
# If a given attribute has been placed in the format string, skip
# over other formats for that same underlying attribute (IE, month
# can be represented in multiple different ways)
if set(attrs) & found_attrs:
continue
if all(getattr(parsed_datetime, attr) is not None for attr in attrs):
for i, token_format in enumerate(format_guess):
token_filled = tokens[i].zfill(padding)
if (token_format is None and
token_filled == parsed_datetime.strftime(attr_format)):
format_guess[i] = attr_format
tokens[i] = token_filled
found_attrs.update(attrs)
break
# Only consider it a valid guess if we have a year, month and day
if len(set(['year', 'month', 'day']) & found_attrs) != 3:
return None
output_format = []
for i, guess in enumerate(format_guess):
if guess is not None:
# Either fill in the format placeholder (like %Y)
output_format.append(guess)
else:
# Or just the token separate (IE, the dashes in "01-01-2013")
try:
# If the token is numeric, then we likely didn't parse it
# properly, so our guess is wrong
float(tokens[i])
return None
except ValueError:
pass
output_format.append(tokens[i])
guessed_format = ''.join(output_format)
# rebuild string, capturing any inferred padding
dt_str = ''.join(tokens)
if parsed_datetime.strftime(guessed_format) == dt_str:
return guessed_format
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notnull(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
@deprecate_kwarg(old_arg_name='coerce', new_arg_name='errors',
mapping={True: 'coerce', False: 'raise'})
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True, coerce=None,
unit=None, infer_datetime_format=False):
"""
Convert argument to datetime.
Parameters
----------
arg : string, datetime, list, tuple, 1-d array, Series
.. versionadded: 0.18.1
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
dayfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : boolean, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil beahavior).
.. versionadded: 0.16.1
utc : boolean, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
box : boolean, default True
- If True returns a DatetimeIndex
- If False returns ndarray of values.
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
exact : boolean, True by default
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : string, default 'ns'
unit of the arg (D,s,ms,us,ns) denote the unit in epoch
(e.g. a unix timestamp), which is an integer/float number.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or correspoding array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000']*1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s,infer_datetime_format=True)
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
"""
from pandas.tseries.index import DatetimeIndex
tz = 'utc' if utc else None
def _convert_listlike(arg, box, format, name=None, tz=tz):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
# these are shortcutable
if is_datetime64_ns_dtype(arg):
if box and not isinstance(arg, DatetimeIndex):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
return arg
elif is_datetime64tz_dtype(arg):
if not isinstance(arg, DatetimeIndex):
return DatetimeIndex(arg, tz=tz, name=name)
if utc:
arg = arg.tz_convert(None).tz_localize('UTC')
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, 'values', arg)
result = tslib.array_with_unit_to_datetime(arg, unit,
errors=errors)
if box:
if errors == 'ignore':
from pandas import Index
return Index(result)
return DatetimeIndex(result, tz=tz, name=name)
return result
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = _ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
try:
result = None
if format is not None:
# shortcut formatting here
if format == '%Y%m%d':
try:
result = _attempt_YYYYMMDD(arg, errors=errors)
except:
raise ValueError("cannot convert the input to "
"'%Y%m%d' date format")
# fallback
if result is None:
try:
result = tslib.array_strptime(arg, format, exact=exact,
errors=errors)
except tslib.OutOfBoundsDatetime:
if errors == 'raise':
raise
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == 'raise':
raise
result = arg
if result is None and (format is None or infer_datetime_format):
result = tslib.array_to_datetime(
arg,
errors=errors,
utc=utc,
dayfirst=dayfirst,
yearfirst=yearfirst,
require_iso8601=require_iso8601
)
if is_datetime64_dtype(result) and box:
result = DatetimeIndex(result, tz=tz, name=name)
return result
except ValueError as e:
try:
values, tz = tslib.datetime_to_datetime64(arg)
return DatetimeIndex._simple_new(values, name=name, tz=tz)
except (ValueError, TypeError):
raise e
if arg is None:
return arg
elif isinstance(arg, tslib.Timestamp):
return arg
elif isinstance(arg, ABCSeries):
from pandas import Series
values = _convert_listlike(arg._values, False, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, MutableMapping)):
return _assemble_from_unit_mappings(arg, errors=errors)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, box, format, name=arg.name)
elif is_list_like(arg):
return _convert_listlike(arg, box, format)
return _convert_listlike(np.array([arg]), box, format)[0]
# mappings for assembling units
_unit_map = {'year': 'year',
'years': 'year',
'month': 'month',
'months': 'month',
'day': 'day',
'days': 'day',
'hour': 'h',
'hours': 'h',
'minute': 'm',
'minutes': 'm',
'second': 's',
'seconds': 's',
'ms': 'ms',
'millisecond': 'ms',
'milliseconds': 'ms',
'us': 'us',
'microsecond': 'us',
'microseconds': 'us',
'ns': 'ns',
'nanosecond': 'ns',
'nanoseconds': 'ns'
}
def _assemble_from_unit_mappings(arg, errors):
"""
assemble the unit specifed fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ['year', 'month', 'day']
req = sorted(list(set(required) - set(unit_rev.keys())))
if len(req):
raise ValueError("to assemble mappings requires at "
"least that [year, month, day] be specified: "
"[{0}] is missing".format(','.join(req)))
# keys we don't recognize
excess = sorted(list(set(unit_rev.keys()) - set(_unit_map.values())))
if len(excess):
raise ValueError("extra keys have been passed "
"to the datetime assemblage: "
"[{0}]".format(','.join(excess)))
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype('int64', copy=False)
return values
values = (coerce(arg[unit_rev['year']]) * 10000 +
coerce(arg[unit_rev['month']]) * 100 +
coerce(arg[unit_rev['day']]))
try:
values = to_datetime(values, format='%Y%m%d', errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the "
"datetimes: {0}".format(e))
for u in ['h', 'm', 's', 'ms', 'us', 'ns']:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]),
unit=u,
errors=errors)
except (TypeError, ValueError) as e:
raise ValueError("cannot assemble the datetimes "
"[{0}]: {1}".format(value, e))
return values
def _attempt_YYYYMMDD(arg, errors):
""" try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = lib.try_parse_year_month_day(carg / 10000,
carg / 100 % 100,
carg % 100)
return tslib.array_to_datetime(parsed, errors=errors)
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype='M8[ns]')
iresult = result.view('i8')
iresult[~mask] = tslib.iNaT
result[mask] = calc(carg[mask].astype(np.float64).astype(np.int64)). \
astype('M8[ns]')
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except:
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notnull(carg))
except:
pass
# string with NaN-like
try:
mask = ~lib.ismember(arg, tslib._nat_strings)
return calc_with_mask(arg, mask)
except:
pass
return None
def _format_is_iso(f):
"""
Does format match the iso8601 set that can be handled by the C parser?
Generally of form YYYY-MM-DDTHH:MM:SS - date separator can be different
but must be consistent. Leading 0s in dates and times are optional.
"""
iso_template = '%Y{date_sep}%m{date_sep}%d{time_sep}%H:%M:%S.%f'.format
excluded_formats = ['%Y%m%d', '%Y%m', '%Y']
for date_sep in [' ', '/', '\\', '-', '.', '']:
for time_sep in [' ', 'T']:
if (iso_template(date_sep=date_sep,
time_sep=time_sep
).startswith(f) and f not in excluded_formats):
return True
return False
def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None):
"""
Try hard to parse datetime string, leveraging dateutil plus some extra
goodies like quarter recognition.
Parameters
----------
arg : compat.string_types
freq : str or DateOffset, default None
Helps with interpreting time string if supplied
dayfirst : bool, default None
If None uses default from print_config
yearfirst : bool, default None
If None uses default from print_config
Returns
-------
datetime, datetime/dateutil.parser._result, str
"""
from pandas.core.config import get_option
if not isinstance(arg, compat.string_types):
return arg
from pandas.tseries.offsets import DateOffset
if isinstance(freq, DateOffset):
freq = freq.rule_code
if dayfirst is None:
dayfirst = get_option("display.date_dayfirst")
if yearfirst is None:
yearfirst = get_option("display.date_yearfirst")
return tslib.parse_datetime_string_with_reso(arg, freq=freq,
dayfirst=dayfirst,
yearfirst=yearfirst)
DateParseError = tslib.DateParseError
normalize_date = tslib.normalize_date
# Fixed time formats for time parsing
_time_formats = ["%H:%M", "%H%M", "%I:%M%p", "%I%M%p",
"%H:%M:%S", "%H%M%S", "%I:%M:%S%p", "%I%M%S%p"]
def _guess_time_format_for_array(arr):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notnull(arr).nonzero()[0]
if len(non_nan_elements):
element = arr[non_nan_elements[0]]
for time_format in _time_formats:
try:
datetime.strptime(element, time_format)
return time_format
except ValueError:
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors='raise'):
"""
Parse time strings to time objects using fixed strptime formats ("%H:%M",
"%H%M", "%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
Use infer_time_format if all the strings are in the same format to speed
up conversion.
Parameters
----------
arg : string in time format, datetime.time, list, tuple, 1-d array, Series
format : str, default None
Format used to convert arg into a time object. If None, fixed formats
are used.
infer_time_format: bool, default False
Infer the time format based on the first non-NaN element. If all
strings are in the same format, this will speed up conversion.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as None
- If 'ignore', then invalid parsing will return the input
Returns
-------
datetime.time
"""
from pandas.core.series import Series
def _convert_listlike(arg, format):
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype='O')
elif getattr(arg, 'ndim', 1) > 1:
raise TypeError('arg must be a string, datetime, list, tuple, '
'1-d array, or Series')
arg = _ensure_object(arg)
if infer_time_format and format is None:
format = _guess_time_format_for_array(arg)
times = []
if format is not None:
for element in arg:
try:
times.append(datetime.strptime(element, format).time())
except (ValueError, TypeError):
if errors == 'raise':
raise ValueError("Cannot convert %s to a time with "
"given format %s" % (element, format))
elif errors == 'ignore':
return arg
else:
times.append(None)
else:
formats = _time_formats[:]
format_found = False
for element in arg:
time_object = None
for time_format in formats:
try:
time_object = datetime.strptime(element,
time_format).time()
if not format_found:
# Put the found format in front
fmt = formats.pop(formats.index(time_format))
formats.insert(0, fmt)
format_found = True
break
except (ValueError, TypeError):
continue
if time_object is not None:
times.append(time_object)
elif errors == 'raise':
raise ValueError("Cannot convert arg {arg} to "
"a time".format(arg=arg))
elif errors == 'ignore':
return arg
else:
times.append(None)
return times
if arg is None:
return arg
elif isinstance(arg, time):
return arg
elif isinstance(arg, Series):
values = _convert_listlike(arg._values, format)
return Series(values, index=arg.index, name=arg.name)
elif isinstance(arg, ABCIndexClass):
return _convert_listlike(arg, format)
elif is_list_like(arg):
return _convert_listlike(arg, format)
return _convert_listlike(np.array([arg]), format)[0]
def format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime('%Y%m%d')
OLE_TIME_ZERO = datetime(1899, 12, 30, 0, 0, 0)
def ole2datetime(oledt):
"""function for converting excel date to normal date format"""
val = float(oledt)
# Excel has a bug where it thinks the date 2/29/1900 exists
# we just reject any date before 3/1/1900.
if val < 61:
raise ValueError("Value is outside of acceptable range: %s " % val)
return OLE_TIME_ZERO + timedelta(days=val)
| mit |
ominux/scikit-learn | sklearn/tests/test_multiclass.py | 2 | 3397 |
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_true
from nose.tools import assert_raises
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multiclass import OneVsOneClassifier
from sklearn.multiclass import OutputCodeClassifier
from sklearn.svm import LinearSVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from sklearn import datasets
iris = datasets.load_iris()
perm = np.random.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
n_classes = 3
def test_ovr_exceptions():
ovr = OneVsRestClassifier(LinearSVC())
assert_raises(ValueError, ovr.predict, [])
def test_ovr_fit_predict():
# A classifier which implements decision_function.
ovr = OneVsRestClassifier(LinearSVC())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovr.estimators_), n_classes)
pred2 = LinearSVC().fit(iris.data, iris.target).predict(iris.data)
assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2))
# A classifier which implements predict_proba.
ovr = OneVsRestClassifier(MultinomialNB())
pred = ovr.fit(iris.data, iris.target).predict(iris.data)
assert_true(np.mean(iris.target == pred) >= 0.65)
def test_ovr_gridsearch():
ovr = OneVsRestClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovr, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator.estimators_[0].C
assert_true(best_C in Cs)
def test_ovo_exceptions():
ovo = OneVsOneClassifier(LinearSVC())
assert_raises(ValueError, ovo.predict, [])
def test_ovo_fit_predict():
# A classifier which implements decision_function.
ovo = OneVsOneClassifier(LinearSVC())
pred = ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
# A classifier which implements predict_proba.
ovo = OneVsOneClassifier(MultinomialNB())
pred = ovo.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2)
def test_ovo_gridsearch():
ovo = OneVsOneClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ovo, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator.estimators_[0].C
assert_true(best_C in Cs)
def test_ecoc_exceptions():
ecoc = OutputCodeClassifier(LinearSVC())
assert_raises(ValueError, ecoc.predict, [])
def test_ecoc_fit_predict():
# A classifier which implements decision_function.
ecoc = OutputCodeClassifier(LinearSVC(), code_size=2)
pred = ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
# A classifier which implements predict_proba.
ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2)
pred = ecoc.fit(iris.data, iris.target).predict(iris.data)
assert_equal(len(ecoc.estimators_), n_classes * 2)
def test_ecoc_gridsearch():
ecoc = OutputCodeClassifier(LinearSVC())
Cs = [0.1, 0.5, 0.8]
cv = GridSearchCV(ecoc, {'estimator__C': Cs})
cv.fit(iris.data, iris.target)
best_C = cv.best_estimator.estimators_[0].C
assert_true(best_C in Cs)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.